repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
spektom/incubator-airflow
|
tests/test_core.py
|
Python
|
apache-2.0
| 18,421
| 0.000869
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import multiprocessing
import os
import signal
import unittest
from datetime import timedelta
from time import sleep
from dateutil.relativedelta import relativedelta
from numpy.testing import assert_array_almost_equal
from airflow import DAG, exceptions, settings
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models import DagBag, DagRun, TaskFail, TaskInstance
from airflow.models.baseoperator import BaseOperator
from airflow.operators.bash import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.settings import Session
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from tests.test_utils.config import conf_vars
DEV_NULL = '/dev/null'
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_tests'
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(self, context):
pass
class TestCore(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
self.dagbag = DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
session = Session()
session.query(DagRun).filter(
DagRun.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.query(TaskInstance).filter(
TaskInstance.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.query(TaskFail).filter(
TaskFail.dag_id == TEST_DAG_ID).delete(
synchronize_session=False)
session.commit()
session.close()
def test_check_operators(self):
conn_id = "sqlite_default"
captain_hook = BaseHook.get_hook(conn_id=conn_id) # quite funny :D
captain_hook.run("CREATE TABLE operator_test_table (a, b)")
captain_hook.run("insert into operator_test_table values (1,2)")
op = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
op = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captain_hook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
msg = 'Invalid arguments were passed to BashOperator (task_id: test_illegal_args).'
with conf_vars({('operators', 'allow_illegal_arguments'): 'True'}):
with self.assertWarns(PendingDeprecationWarning) as warning:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
assert any(msg in str(w) for w in warning.warnings)
def test_illegal_args_forbidden(self):
"""
Tests that operators raise exceptions on illegal arguments when
illegal arguments are not allowed.
"""
with self.assertRaises(AirflowException) as ctx:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
|
str(ctx.exception))
def test_bash_operator(self):
|
op = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
op = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command="echo \u2600",
dag=self.dag,
output_encoding='utf-8')
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
op = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
op = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
exceptions.AirflowException,
op.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_dryrun(self):
op = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
op.dry_run()
def test_sqlite(self):
import airflow.providers.sqlite.operators.sqlite
op = airflow.providers.sqlite.operators.sqlite.SqliteOp
|
AndrewPeelMV/Blender2.78c
|
2.78/scripts/addons/blender_cloud/texture_browser.py
|
Python
|
gpl-2.0
| 38,526
| 0.001609
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import asyncio
import logging
import threading
import os
import bpy
import bgl
import blf
import pillarsdk
from . import async_loop, pillar, cache, blender, utils
REQUIRED_ROLES_FOR_TEXTURE_BROWSER = {'subscriber', 'demo'}
MOUSE_SCROLL_PIXELS_PER_TICK = 50
ICON_WIDTH = 128
ICON_HEIGHT = 128
TARGET_ITEM_WIDTH = 400
TARGET_ITEM_HEIGHT = 128
ITEM_MARGIN_X = 5
ITEM_MARGIN_Y = 5
ITEM_PADDING_X = 5
library_path = '/tmp'
library_icons_path = os.path.join(os.path.dirname(__file__), "icons")
log = logging.getLogger(__name__)
class SpecialFolderNode(pillarsdk.Node):
NODE_TYPE = 'SPECIAL'
class UpNode(SpecialFolderNode):
NODE_TYPE = 'UP'
def __init__(self):
super().__init__()
self['_id'] = 'UP'
self['node_type'] = self.NODE_TYPE
class ProjectNode(SpecialFolderNode):
NODE_TYPE = 'PROJECT'
def __init__(self, project):
super().__init__()
assert isinstance(project, pillarsdk.Project), 'wrong type for project: %r' % type(project)
self.merge(project.to_dict())
self['node_type'] = self.NODE_TYPE
class MenuItem:
"""GUI menu item for the 3D View GUI."""
icon_margin_x = 4
icon_margin_y = 4
text_margin_x = 6
text_height = 16
text_width = 72
DEFAULT_ICONS = {
'FOLDER': os.path.join(library_icons_path, 'folder.png'),
'SPINNER': os.path.join(library_icons_path, 'spinner.png'),
}
FOLDER_NODE_TYPES = {'group_texture', 'group_hdri', UpNode.NODE_TYPE, ProjectNode.NODE_TYPE}
SUPPORTED_NODE_TYPES = {'texture', 'hdri'}.union(FOLDER_NODE_TYPES)
def __init__(self, node, file_desc, thumb_path: str, label_text):
self.log = logging.getLogger('%s.MenuItem' % __name__)
if node['node_type'] not in self.SUPPORTED_NODE_TYPES:
self.log.info('Invalid nod
|
e type in node: %s', node)
raise TypeError('Node of type %r not supported; supported are %r.' % (
node['node_type'], self.SUPPORTED_NODE_TYPES))
assert isinstance(node, pillarsdk.Node), 'wrong type for node: %r' % type(node)
assert isinstance(node['_id'], str), 'wrong type for node["_id"]: %r' % type(node['_id'])
self.node = node # pillarsdk.Node, contains 'node_type' key to indicate type
self.file_desc = fil
|
e_desc # pillarsdk.File object, or None if a 'folder' node.
self.label_text = label_text
self._thumb_path = ''
self.icon = None
self._is_folder = node['node_type'] in self.FOLDER_NODE_TYPES
self._is_spinning = False
# Determine sorting order.
# by default, sort all the way at the end and folders first.
self._order = 0 if self._is_folder else 10000
if node and node.properties and node.properties.order is not None:
self._order = node.properties.order
self.thumb_path = thumb_path
# Updated when drawing the image
self.x = 0
self.y = 0
self.width = 0
self.height = 0
def sort_key(self):
"""Key for sorting lists of MenuItems."""
return self._order, self.label_text
@property
def thumb_path(self) -> str:
return self._thumb_path
@thumb_path.setter
def thumb_path(self, new_thumb_path: str):
self._is_spinning = new_thumb_path == 'SPINNER'
self._thumb_path = self.DEFAULT_ICONS.get(new_thumb_path, new_thumb_path)
if self._thumb_path:
self.icon = bpy.data.images.load(filepath=self._thumb_path)
else:
self.icon = None
@property
def node_uuid(self) -> str:
return self.node['_id']
def represents(self, node) -> bool:
"""Returns True iff this MenuItem represents the given node."""
node_uuid = node['_id']
return self.node_uuid == node_uuid
def update(self, node, file_desc, thumb_path: str, label_text=None):
# We can get updated information about our Node, but a MenuItem should
# always represent one node, and it shouldn't be shared between nodes.
if self.node_uuid != node['_id']:
raise ValueError("Don't change the node ID this MenuItem reflects, "
"just create a new one.")
self.node = node
self.file_desc = file_desc # pillarsdk.File object, or None if a 'folder' node.
self.thumb_path = thumb_path
if label_text is not None:
self.label_text = label_text
@property
def is_folder(self) -> bool:
return self._is_folder
@property
def is_spinning(self) -> bool:
return self._is_spinning
def update_placement(self, x, y, width, height):
"""Use OpenGL to draw this one menu item."""
self.x = x
self.y = y
self.width = width
self.height = height
def draw(self, highlighted: bool):
bgl.glEnable(bgl.GL_BLEND)
if highlighted:
bgl.glColor4f(0.555, 0.555, 0.555, 0.8)
else:
bgl.glColor4f(0.447, 0.447, 0.447, 0.8)
bgl.glRectf(self.x, self.y, self.x + self.width, self.y + self.height)
texture = self.icon
err = texture.gl_load(filter=bgl.GL_NEAREST, mag=bgl.GL_NEAREST)
assert not err, 'OpenGL error: %i' % err
bgl.glColor4f(0.0, 0.0, 1.0, 0.5)
# bgl.glLineWidth(1.5)
# ------ TEXTURE ---------#
bgl.glBindTexture(bgl.GL_TEXTURE_2D, texture.bindcode[0])
bgl.glEnable(bgl.GL_TEXTURE_2D)
bgl.glBlendFunc(bgl.GL_SRC_ALPHA, bgl.GL_ONE_MINUS_SRC_ALPHA)
bgl.glColor4f(1, 1, 1, 1)
bgl.glBegin(bgl.GL_QUADS)
bgl.glTexCoord2d(0, 0)
bgl.glVertex2d(self.x + self.icon_margin_x, self.y)
bgl.glTexCoord2d(0, 1)
bgl.glVertex2d(self.x + self.icon_margin_x, self.y + ICON_HEIGHT)
bgl.glTexCoord2d(1, 1)
bgl.glVertex2d(self.x + self.icon_margin_x + ICON_WIDTH, self.y + ICON_HEIGHT)
bgl.glTexCoord2d(1, 0)
bgl.glVertex2d(self.x + self.icon_margin_x + ICON_WIDTH, self.y)
bgl.glEnd()
bgl.glDisable(bgl.GL_TEXTURE_2D)
bgl.glDisable(bgl.GL_BLEND)
texture.gl_free()
# draw some text
font_id = 0
blf.position(font_id,
self.x + self.icon_margin_x + ICON_WIDTH + self.text_margin_x,
self.y + ICON_HEIGHT * 0.5 - 0.25 * self.text_height, 0)
blf.size(font_id, self.text_height, self.text_width)
blf.draw(font_id, self.label_text)
def hits(self, mouse_x: int, mouse_y: int) -> bool:
return self.x < mouse_x < self.x + self.width and self.y < mouse_y < self.y + self.height
class BlenderCloudBrowser(pillar.PillarOperatorMixin,
async_loop.AsyncModalOperatorMixin,
bpy.types.Operator):
bl_idname = 'pillar.browser'
bl_label = 'Blender Cloud Texture Browser'
_draw_handle = None
current_path = pillar.CloudPath('/')
project_name = ''
# This contains a stack of Node objects that lead up to the currently browsed node.
path_stack = []
# This contains a stack of MenuItem objects that lead up to the currently browsed node.
menu_item_stack = []
timer = None
log = logging.getLogger('%s.BlenderCloudBrowser' % __name__)
_menu_item_lock = threading.Lock()
current_display_content = []
|
jrversteegh/softsailor
|
deps/swig-2.0.4/Examples/python/smartptr/runme.py
|
Python
|
gpl-3.0
| 1,069
| 0.01029
|
# file: runme.
|
py
# This file illustrates
|
the proxy class C++ interface generated
# by SWIG.
import example
# ----- Object creation -----
print "Creating some objects:"
cc = example.Circle(10)
c = example.ShapePtr(cc)
print " Created circle", c
ss = example.Square(10)
s = example.ShapePtr(ss)
print " Created square", s
# ----- Access a static member -----
print "\nA total of", example.cvar.Shape_nshapes,"shapes were created"
# ----- Member data access -----
# Set the location of the object
c.x = 20
c.y = 30
s.x = -10
s.y = 5
print "\nHere is their current position:"
print " Circle = (%f, %f)" % (c.x,c.y)
print " Square = (%f, %f)" % (s.x,s.y)
# ----- Call some methods -----
print "\nHere are some properties of the shapes:"
for o in [c,s]:
print " ", o
print " area = ", o.area()
print " perimeter = ", o.perimeter()
print "\nGuess I'll clean up now"
# Note: this invokes the virtual destructor
del c
del s
del cc
del ss
s = 3
print example.cvar.Shape_nshapes,"shapes remain"
print "Goodbye"
|
mariusvniekerk/impyla
|
impala/dbapi/beeswax.py
|
Python
|
apache-2.0
| 9,904
| 0
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import getpass
import time
import sys
import six
import os
from impala.dbapi.interface import Connection, Cursor, _bind_parameters
from impala._rpc import beeswax as rpc
from impala.error import NotSupportedError, ProgrammingError, OperationalError
from impala._thrift_api.beeswax import QueryState
class BeeswaxConnection(Connection):
# PEP 249
def __init__(self, service, default_db=None):
self.service = service
self.default_db = default_db
self.default_query_options = {}
def close(self):
"""Close the session and the Thrift transport."""
# PEP 249
rpc.close_service(self.service)
def commit(self):
"""Impala doesn't support transactions; does nothing."""
# PEP 249
pass
def rollback(self):
"""Impala doesn't support transactions; raises NotSupportedError"""
# PEP 249
raise NotSupportedError
def cursor(self, user=None, configuration=None):
# PEP 249
if user is None:
user = getpass.getuser()
options = rpc.build_default_query_options_dict(self.service)
for opt in options:
self.default_query_options[opt.key.upper()] = opt.value
cursor = BeeswaxCursor(self.service, user)
if self.default_db is not None:
cursor.execute('USE %s' % self.default_db)
return cursor
def reconnect(self):
rpc.reconnect(self.service)
class BeeswaxCursor(Cursor):
# PEP 249
# Beeswax does not support sessions
def __init__(self, service, user):
self.service = service
self.user = user
self._last_operation_string = None
self._last_operation_handle = None
self._last_operation_active = False
self._buffersize = None
self._buffer = []
# initial values, per PEP 249
self._description = None
self._rowcount = -1
self.query_state = QueryState._NAMES_TO_VALUES
@property
def description(self):
# PEP 249
return self._description
@property
def rowcount(self):
# PEP 249
return self._rowcount
@property
def query_string(self):
return self._last_operation_string
def get_arraysize(self):
# PEP 249
return self._buffersize if self._buffersize else 1
def set_arraysize(self, arraysize):
# PEP 249
self._buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
@property
def buffersize(self):
# this is for internal use. it provides an alternate default value for
# the size of the buffer, so that calling .next() will read multiple
# rows into a buffer if arraysize hasn't been set. (otherwise, we'd
# get an unbuffered impl because the PEP 249 default value of arraysize
# is 1)
return self._buffersize if self._buffersize else 1024
@property
def has_result_set(self):
return (self._last_operation_handle is not None and
rpc.expect_result_metadata(self._last_operation_string))
def close(self):
# PEP 249
pass
def cancel_operation(self):
if self._last_operation_active:
self._last_operation_active = False
rpc.cancel_query(self.service, self._last_operation_handle)
def close_operation(self):
if self._last_operation_active:
self._last_operation_active = False
rpc.close_query(self.service, self._last_operation_handle)
def execute(self, operation, parameters=None, configuration=None):
# PEP 249
if configuration is None:
configuration = {}
def op():
if parameters:
self._last_operation_string = _bind_parameters(operation,
parameters)
else:
self._last_operation_string = operation
query = rpc.create_beeswax_query(self._last_operation_string,
self.user, configuration)
self._last_operation_handle = rpc.execute_statement(self.service,
query)
self._execute_sync(op)
def _execute_sync(self, operation_fn):
# operation_fn should set self._last_operation_string and
# self._last_operation_handle
self._reset_state()
operation_fn()
self._last_operation_active = True
self._wait_to_finish() # make execute synchronous
if self.has_result_set:
schema = rpc.get_results_metadata(
self.service, self._last_operation_handle)
self._description = [tuple([tup.name, tup.type.upper()] +
[None, None, None, None, None])
for tup in schema]
else:
self._last_operation_active = False
rpc.close_query(self.service, self._last_operation_handle)
def _reset_state(self):
self._buffer = []
self._rowcount = -1
self._description = None
if self._last_operation_active:
self._last_operation_active = False
rpc.close_query(self.service, self._last_operation_handle)
self._last_operation_string = None
self._last_operation_handle = None
def _wait_to_finish(self):
loop_start = time.time()
while True:
operation_state = rpc.get_query_state(
self.service, self._last_operation_handle)
if operation_state == self.query_state["FINISHED"]:
break
elif operation_state == self.query_state["EXCEPTION"]:
raise OperationalError(self.get_log())
time.sleep(self._get_sleep_interval(loop_start))
def _get_sleep_interval(self, start_time):
"""Returns a step function of time to sleep in seconds before polling
again. Maximum sleep is 1s, minimum is 0.1s"""
elapsed = time.time() - start_time
if elapsed < 10.0:
return 0.1
elif elapsed < 60.0:
return 0.5
return 1.0
def executemany(self, operation, seq_of_parameters):
# PEP 249
for parameters in seq_of_parameters:
self.execute(operation, parameters)
if self.has_result_set:
raise ProgrammingError("Operations that have result sets are "
"not allowed with executemany.")
def fetchone(self):
# PEP 249
if not self.has_result_set:
raise ProgrammingError("Tried to fetch but no results.")
try:
return next(self)
except StopIteration:
return None
def fetchmany(self, size=None):
# PEP 249
if not self.has_result_set:
raise ProgrammingError("Tried to fetch but no results.")
if size is
|
None:
size = self.arraysize
local_buffer = []
i = 0
while i < size:
try:
local_buffer.append(next(self))
i += 1
|
except StopIteration:
break
return local_buffer
def fetchall(self):
# PEP 249
try:
return list(self)
except StopIteration:
return []
def setinputsizes(self, sizes):
# PEP 249
pass
def setoutputsize(self, size, column=None):
# PEP 249
|
MarxMustermann/OfMiceAndMechs
|
src/itemFolder/obsolete/commandBook.py
|
Python
|
gpl-3.0
| 689
| 0.001451
|
import src
class CommandBook(src.items.Item):
type = "CommandBook"
"""
call superclass constructor with modified parameters
"""
def __init__(self):
super().__init__(display="cb")
self.name = "command book"
self.bolted = False
self.walkable = True
totalCommands = 0
self.contents = []
self.attributesToStore.extend(["contents"])
def getState(self):
state = super().getState()
try:
state["contents"] = self.availableChallenges
state["knownBlueprints"] = self.knownBlueprints
except:
pass
r
|
eturn state
sr
|
c.items.addType(CommandBook)
|
Gaia3D/QGIS
|
python/plugins/processing/algs/gdal/ogr2ogrclipextent.py
|
Python
|
gpl-2.0
| 3,489
| 0.001433
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ogr2ogrclipextent.py
---------------------
Date : November 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'November 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterExtent
from processing.core.outputs import OutputVector
from processing.tools.system import isWindows
from processing.algs.gdal.OgrAlgorithm import OgrAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
class Ogr2OgrClipExtent(OgrAlgorithm):
OUTPUT_LAYER = 'OUTPUT_LAYER'
INPUT_LAYER = 'INPUT_LAYER'
CLIP_EXTENT = 'CLIP_EXTENT'
OPTIONS = 'OPTIONS'
def defineCharacteristics(self):
self.name = 'Clip vectors by extent'
self.group = '[OGR] Geoprocessing'
self.addParameter(ParameterVector(self.INPUT_LAYER,
self.tr('Input layer'), [Parameter
|
Vector.VECTOR_TYPE_ANY], False))
self.addParameter(ParameterExtent(self.CLIP_EXTENT,
self.tr('Clip extent')))
self.addParameter(ParameterString(self.OPTIONS,
self.tr('Additional creation options'), '', optional=True))
self.addOutput(OutputVector(self.OUTPUT_LAYER, self.tr('Output layer')))
|
def getConsoleCommands(self):
inLayer = self.getParameterValue(self.INPUT_LAYER)
ogrLayer = self.ogrConnectionString(inLayer)[1:-1]
clipExtent = self.getParameterValue(self.CLIP_EXTENT)
ogrclipExtent = self.ogrConnectionString(clipExtent)
output = self.getOutputFromName(self.OUTPUT_LAYER)
outFile = output.value
output = self.ogrConnectionString(outFile)
options = unicode(self.getParameterValue(self.OPTIONS))
arguments = []
regionCoords = ogrclipExtent.split(',')
arguments.append('-spat')
arguments.append(regionCoords[0])
arguments.append(regionCoords[2])
arguments.append(regionCoords[1])
arguments.append(regionCoords[3])
arguments.append('-clipsrc spat_extent')
if len(options) > 0:
arguments.append(options)
arguments.append(output)
arguments.append(ogrLayer)
arguments.append(self.ogrLayerName(inLayer))
commands = []
if isWindows():
commands = ['cmd.exe', '/C ', 'ogr2ogr.exe',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = ['ogr2ogr', GdalUtils.escapeAndJoin(arguments)]
return commands
|
onoga/toolib
|
toolib/wx/grid/test/testDeleteSelection.py
|
Python
|
gpl-2.0
| 604
| 0.023179
|
i
|
mport wx
from toolib.wx.TestApp import TestApp
from toolib.wx.grid.Grid import Grid
from toolib.wx.grid.table.List2dTable import List2dTable
from toolib.wx.grid.MDeleteSelection import MDeleteSelection
class MyGrid(Grid, MDeleteSelection):
def __init__(self, *args, **kwargs):
Grid.__init__(self, *args, **kwargs)
MDeleteSelection.__init__(self)
if __name__ == '__main__':
|
g = None
def oninit(self):
self.grid = MyGrid(self, -1)
self.grid.SetTable(List2dTable())
self.grid.AppendRows(4)
self.grid.AppendCols(4)
def ondestroy(self):
pass
TestApp(oninit, ondestroy).MainLoop()
|
smlacombe/sageo
|
app/controllers/side.py
|
Python
|
gpl-3.0
| 1,232
| 0.012175
|
#
# Copyright (C) 2013 Savoir-Faire Linux Inc.
#
# This file is part of Sageo
#
# Sageo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sageo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sageo. If not, see <http://www.gnu.org/licenses/>
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash, Module, current_app
import app.snapins as snapins
#from app.snapins.sna
|
pin impo
|
rt SnapinBase
sageo = current_app
def side():
snapin_objects = {}
for snapin in snapins.__all__:
#import ipdb;ipdb.set_trace()
__import__('app.snapins.' + snapin + '.' + snapin)
snapin_objects[snapin] = getattr(getattr(getattr(snapins, snapin), snapin),snapin)()
return snapin_objects
|
stlemme/python-dokuwiki-export
|
create-thumbnail.py
|
Python
|
mit
| 575
| 0.034783
|
import logging
from catalog import ThumbnailGenerator
from wiki import DokuWikiRemote
if __name__ == '__main__':
import sys
import wikiconfig
filename = 'thumb.png'
if len(sys.argv) < 2:
sys.exit('Usage:
|
%s :wiki:thumbnail.png [ thumb.png ]' % sys.argv[0])
thumbname = sys.argv[1]
logging.info("Connecting to remote DokuWiki at %s" % wikiconfig.url)
dw = DokuWikiR
|
emote(wikiconfig.url, wikiconfig.user, wikiconfig.passwd)
thumbGen = ThumbnailGenerator(dw)
if len(sys.argv) > 2:
filename = sys.argv[2]
thumbGen.generate_thumb(thumbname, filename)
|
tdruez/django-registration
|
registration/models.py
|
Python
|
bsd-3-clause
| 10,792
| 0
|
"""
Model and manager used by the two-step (sign up, then activate)
workflow. If you're not using that workflow, you don't need to have
'registration' in your INSTALLED_APPS.
This is provided primarily for backwards-compatibility with existing
installations; new installs of django-registration should look into
the HMAC activation workflow in registration.backends.hmac, which
provides a two-step process but requires no models or storage of the
activation key.
"""
import datetime
import hashlib
import re
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.db import transaction
from django.template.loader import render_to_string
from django.utils.crypto import get_random_string
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
SHA1_RE = re.compile('^[a-f0-9]{40}$')
class RegistrationManager(models.Manager):
"""
Custom manager for the ``RegistrationProfile`` model.
The methods defined here provide shortcuts for account creation
and activation (including generation and emailing of activation
keys), and for cleaning out expired inactive accounts.
"""
def activate_user(self, activation_key):
"""
Validate an activation key and activate the corresponding
``User`` if valid.
If the key is valid and has not expired, return the ``User``
after activating.
If the key is not valid or has expired, return ``False``.
If the key is valid but the ``User`` is already active,
return ``False``.
To prevent reactivation of an account which has been
deactivated by site administrators, the activation key is
reset to the string constant ``RegistrationProfile.ACTIVATED``
after successful activation.
"""
# Make sure the key we're trying conforms to the pattern of a
# SHA1 hash; if it doesn't, no point trying to look it up in
# the database.
if SHA1_RE.search(activation_key):
try:
profile = self.get(activation_key=activation_key)
except self.model.DoesNotExist:
return False
if not profile.activation_key_expired():
user = profile.user
user.is_active = True
user.save()
profile.activation_key = self.model.ACTIVATED
profile.save()
return user
return False
def create_inactive_user(self, username, email, password,
site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
User = get_user_model()
user_kwargs = {
User.USERNAME_FIELD: username,
'email': email,
'password': password,
}
new_user = User.objects.create_user(**user_kwargs)
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site)
return new_user
create_inactive_user = transaction.atomic(create_inactive_user)
def create_profile(self, user):
"""
Create a ``RegistrationProfile`` for a given
``User``, and return the ``RegistrationProfile``.
The activation key for the ``RegistrationProfile`` will be a
SHA1
|
hash, generated from a combination of the ``User``'s
username and a random salt.
"""
User = get_user_model()
username = str(getattr(user, User.USERNAME_FIELD))
|
hash_input = (get_random_string(5) + username).encode('utf-8')
activation_key = hashlib.sha1(hash_input).hexdigest()
return self.create(user=user,
activation_key=activation_key)
@transaction.atomic
def delete_expired_users(self):
"""
Remove expired instances of ``RegistrationProfile`` and their
associated ``User``s.
Accounts to be deleted are identified by searching for
instances of ``RegistrationProfile`` with expired activation
keys, and then checking to see if their associated ``User``
instances have the field ``is_active`` set to ``False``; any
``User`` who is both inactive and has an expired activation
key will be deleted.
It is recommended that this method be executed regularly as
part of your routine site maintenance; this application
provides a custom management command which will call this
method, accessible as ``manage.py cleanupregistration``.
Regularly clearing out accounts which have never been
activated serves two useful purposes:
1. It alleviates the ocasional need to reset a
``RegistrationProfile`` and/or re-send an activation email
when a user does not receive or does not act upon the
initial activation email; since the account will be
deleted, the user will be able to simply re-register and
receive a new activation key.
2. It prevents the possibility of a malicious user registering
one or more accounts and never activating them (thus
denying the use of those usernames to anyone else); since
those accounts will be deleted, the usernames will become
available for use again.
If you have a troublesome ``User`` and wish to disable their
account while keeping it in the database, simply delete the
associated ``RegistrationProfile``; an inactive ``User`` which
does not have an associated ``RegistrationProfile`` will not
be deleted.
"""
for profile in self.all():
if profile.activation_key_expired():
user = profile.user
if not user.is_active:
profile.delete()
user.delete()
@python_2_unicode_compatible
class RegistrationProfile(models.Model):
"""
A simple profile which stores an activation key for use during
user account registration.
Generally, you will not want to interact directly with instances
of this model; the provided manager includes methods
for creating and activating new accounts, as well as for cleaning
out accounts which have never been activated.
While it is possible to use this model as the value of the
``AUTH_PROFILE_MODULE`` setting, it's not recommended that you do
so. This model's sole purpose is to store data temporarily during
account registration and activation.
"""
ACTIVATED = u"ALREADY_ACTIVATED"
user = models.OneToOneField(settings.AUTH_USER_MODEL,
verbose_name=_('user'))
activation_key = models.CharField(_('activation key'), max_length=40)
objects = RegistrationManager()
class Meta:
verbose_name = _('registration profile')
verbose_name_plural = _('registration profiles')
def __str__(self):
return "Registration information for %s" % self.user
def activation_key_expired(self):
"""
Determine whether this ``RegistrationProfile``'s activation
key has expired, returning a boolean -- ``True`` if the key
has expired.
Key expiration is determined by a two-step process:
1. If the user has already activated, the key will have been
reset to the string constant ``ACTIVATED``. Re-activating
is not permitted, and so this method returns ``True`` in
this case.
2. Otherwise, the date the user signed up is incremented by
the number of days specified in the setting
``ACCOUNT_ACTIVATION_DAYS`` (which should be the number of
|
AdrianGaudebert/socorro-crashstats
|
vendor-local/lib/python/raven/handlers/logbook.py
|
Python
|
mpl-2.0
| 2,760
| 0.003261
|
"""
raven.handlers.logbook
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logbook
import sys
import traceback
from raven.base import Client
from raven.utils.encoding import to_string
class SentryHandler(logbook.Handler):
def __init__(self, *args, **kwargs):
if len(args) == 1:
arg = args[0]
if isinstance(arg, basestring):
self.client = kwargs.pop('client_cls', Client)(dsn=arg)
elif isinstance(arg, Client):
self.client = arg
else:
raise ValueError('The first argument to %s must be either a Client instance or a DSN, got %r instead.' % (
self.__class__.__name__,
arg,
))
args = []
else:
try:
self.client = kwargs.pop('client')
except KeyError:
raise TypeError('Expected keyword argument for SentryHandler: client')
super(SentryHandler, self).__init__(*args, **kwargs)
def emit(self, record):
try:
# Avoid typical config issues by overriding loggers behavior
if record.channel.startswith('sentry.errors'):
print >> sys.stderr, to_string(self.format(record))
return
return self._emit(record)
except Exception:
print >> sys.stderr, "Top level Sentry exception caught - failed creating log record"
print >> sys.stderr, to_string(record.msg)
print >> s
|
ys.stderr, to_string(traceback.format_exc())
try:
self.client.captureException()
except Exception:
pass
def _emit(self, record):
data = {
'level': logbook.get_level_name(record.level).lower(),
'logger': record.channel,
'message': self.format(record),
}
event_type = 'raven.events.Message'
handler_kwargs = {'message': record.msg, 'params':
|
record.args}
# If there's no exception being processed, exc_info may be a 3-tuple of None
# http://docs.python.org/library/sys.html#sys.exc_info
if record.exc_info is True or (record.exc_info and all(record.exc_info)):
handler = self.client.get_handler(event_type)
data.update(handler.capture(**handler_kwargs))
event_type = 'raven.events.Exception'
handler_kwargs = {'exc_info': record.exc_info}
return self.client.capture(event_type,
data=data,
extra=record.extra,
**handler_kwargs
)
|
NoctuaNivalis/qutebrowser
|
tests/unit/config/test_configexc.py
|
Python
|
gpl-3.0
| 3,233
| 0
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.config.configexc."""
import textwrap
import pytest
from qutebrowser.config import configexc
from qutebrowser.utils import usertypes
def test_validation_error():
e = configexc.ValidationError('val', 'msg')
assert e.option is None
assert str(e) == "Invalid value 'val' - msg"
@pytest.mark.parametrize('deleted, renamed, expected', [
(False, None, "No option 'opt'"),
(True, None, "No option 'opt' (this option was removed from qutebrowser)"),
(False, 'new', "No option 'opt' (this option was renamed to 'new')"),
])
def test_no_option_error(deleted, renamed, expected):
e = configexc.NoOptionError('opt', deleted=deleted, renamed=renamed)
assert e.option == 'opt'
assert str(e) == expected
def test_no_option_error_clash():
with pytest.raises(AssertionError):
configexc.NoOptionError('opt', deleted=True, renamed='foo')
def test_backend_error():
e = configexc.BackendError(usertypes.Backend.QtWebKit)
assert str(e) == "This setting is not available with the QtWebKit backend!"
def test_desc_with_text():
"""Test ConfigErrorDesc.with_text."""
old = configexc.ConfigErrorDesc("Error text", Exception("Exception text"))
new = old.
|
with_text(
|
"additional text")
assert str(new) == 'Error text (additional text): Exception text'
@pytest.fixture
def errors():
"""Get a ConfigFileErrors object."""
err1 = configexc.ConfigErrorDesc("Error text 1", Exception("Exception 1"))
err2 = configexc.ConfigErrorDesc("Error text 2", Exception("Exception 2"),
"Fake traceback")
return configexc.ConfigFileErrors("config.py", [err1, err2])
def test_config_file_errors_str(errors):
assert str(errors).splitlines() == [
'Errors occurred while reading config.py:',
' Error text 1: Exception 1',
' Error text 2: Exception 2',
]
def test_config_file_errors_html(errors):
html = errors.to_html()
assert textwrap.dedent(html) == textwrap.dedent("""
Errors occurred while reading config.py:
<ul>
<li>
<b>Error text 1</b>: Exception 1
</li>
<li>
<b>Error text 2</b>: Exception 2
<pre>
Fake traceback
</pre>
</li>
</ul>
""")
# Make sure the traceback is not indented
assert '<pre>\nFake traceback\n' in html
|
CodigoSur/cyclope
|
cyclope/migrations/0026_auto__chg_field_sitesettings_font_size.py
|
Python
|
gpl-3.0
| 13,275
| 0.007684
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'SiteSettings.font_size'
db.alter_column('cyclope_sitesettings', 'font_size', self.gf('django.db.models.fields.DecimalField')(max_digits=4, decimal_places=2))
def backwards(self, orm):
# Changing field 'SiteSettings.font_size'
db.alter_column('cyclope_sitesettings', 'font_size', self.gf('django.db.models.fields.PositiveSmallIntegerField')())
models = {
'collections.collection': {
'Meta': {'object_name': 'Collection'},
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'db_index': 'True', 'symmetrical': 'False'}),
'default_list_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '250', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'navigation_root': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'blank': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cyclope.author': {
'Meta': {'ordering': "['name']", 'object_name': 'Author'},
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'db_index': 'True', 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'origin': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'blank': 'True'})
},
'cyclope.image': {
'Meta': {'object_name': 'Image'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '100'})
},
'cyclope.layout': {
'Meta': {'object_name': 'Layout'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cyclope.menu': {
'Meta': {'object_name': 'Menu'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'})
},
'cyclope.menuitem': {
'Meta': {'object_name': 'MenuItem'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'menu_entries'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'content_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'custom_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Layout']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'menu_items'", 'to': "orm['cyclope.Menu']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cyclope.MenuItem']"}),
'persistent_layout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_in
|
dex': 'True'}),
|
'site_home': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"})
},
'cyclope.regionview': {
'Meta': {'object_name': 'RegionView'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'region_views'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'content_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layout': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Layout']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'cyclope.relatedcontent': {
'Meta': {'ordering': "['
|
ischleifer/smile
|
docs/examples/oddball.py
|
Python
|
gpl-3.0
| 8,307
| 0.008908
|
#emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
#ex: set sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See the COPYING file distributed along with the smile package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
# global imports
import random
import string
# load all the states
from smile import *
from smile.pulse import Pulse
from smile.audio import Beep
# create an experiment
#exp = Experiment()
exp = Experiment(screen_ind=0, resolution=(1024,768), pyglet_vsync=False)
# config vars
DO_PULSE = True
PULSE_ISI = 2.0
PULSE_JITTER = 2.0
# list def
NUM_REPS = 1
NUM_RARE = 10
NUM_COMMON = 40
STIMS = {'visual':['X','O'],
'auditory':['BEEP','BOOP']}
FREQS = {'BOOP':[400,400],
'BEEP':[800,800]}
RESPS = ['F','J']
MODES = STIMS.keys()
|
CONDS = ['common']*NUM_COMMON + ['rare']*NUM_RARE
# timing
AUDIO_DUR = .5
AUDIO_ISI = 1.5
VISUAL_DUR = 1.0
VISUAL_ISI
|
= 1.0
JITTER = .5
MIN_RT = .100
RESP_DUR = 1.25
# Each stim as rare
# Each response mapped to each stimulus
blocks = []
for mode in MODES:
for reverse_stim in [True, False]:
# pick the proper stim set
stims = STIMS[mode]
# reverse if required
if reverse_stim:
stims = stims[::-1]
# map to common and rare
stim = {'common':stims[0],
'rare':stims[1]}
# loop over response mappings
for reverse_resp in [True, False]:
# pick the responses
resps = RESPS[:]
if reverse_resp:
resps = resps[::-1]
# make the mapping
resp = {'common':resps[0],
'rare':resps[1]}
# shuffle the conds
random.shuffle(CONDS)
# make the block
block = [{'cond':cond,
'modality':mode,
'common_stim':stim['common'],
'rare_stim':stim['rare'],
'common_resp':resp['common'],
'rare_resp':resp['rare'],
'stim':stim[cond],
'correct_resp':resp[cond]}
for cond in CONDS]
# append to blocks
blocks.append(block)
# shuffle the blocks
random.shuffle(blocks)
# do the actual experiment
# start pulsing
if DO_PULSE:
Set('keep_pulsing',True)
with Parallel():
with Loop(conditional=Get('keep_pulsing')):
# send the pulse
pulse=Pulse()
# wait a tiny bit to make sure the end time is registered
Wait(.010, stay_active=True)
# log it all
Log(log_file='pulse.yaml',
pulse_code=pulse['pulse_code'],
pulse_start=pulse['pulse_time'],
pulse_end=pulse['pulse_end_time'])
# Wait the full jitter now
Wait(duration=PULSE_ISI, jitter=PULSE_JITTER)
serial_exp = Serial()
# make the serial parent the active parent
serial_exp.__enter__()
# give instructions
init_inst = """In this experiment we will present blocks of visual and auditory stimuli one stimulus at a time. Your task is to press the key corresponding to the matching stimulus as quickly and accurately as possible when each stimulus is presented. The mappings between stimuli and specific keyboard responses will change for each block.
The visual stimuli will be either an X or an O, while the auditory stimuli will either be a high-frequency Beep or a low-frequency Boop.
We will now review each stimulus prior to beginning the blocks. Press any key to continue.
"""
inst_txt = Text(init_inst, width=600, multiline=True)
KeyPress()
Unshow(inst_txt)
# show each stim
txt = Text("Press any key to see the visual stimuli.")
KeyPress()
Unshow(txt)
with Loop(STIMS['visual']) as stim:
Show(Text(stim.current, font_size=24),
duration=VISUAL_DUR)
Wait(VISUAL_ISI, JITTER)
txt = Text("Press any key to hear the auditory stimuli.")
KeyPress()
Unshow(txt)
with Loop(STIMS['auditory']) as stim:
with Parallel():
Beep(duration=AUDIO_DUR,
freq=Ref(FREQS)[stim.current])
Show(Text(stim.current, font_size=24),
duration=VISUAL_DUR)
Wait(VISUAL_ISI, JITTER)
# give instructions
final_inst = """Note that the words BEEP and BOOP will not be presented during the blocks.
We will now begin the actual experiment. Before each block we will display a screen specifying whether the block with be AUDIORY or VISUAL and what the mapping from the stimuli to the specific keys will be for that block. Please take a moment before beginning the block to learn the new mapping.
Press any key to continue.
"""
inst_txt = Text(final_inst, width=600, multiline=True)
KeyPress()
Unshow(inst_txt)
# loop over blocks
Set('left_stim','')
Set('right_stim','')
Set('stim_time',{'time':0,'error':0})
with Loop(blocks) as block:
# show modality and mapping info
If(block.current[0]['rare_resp']==RESPS[0],
Parallel([Set('left_stim','rare'),Set('right_stim','common')]),
Parallel([Set('left_stim','common'),Set('right_stim','rare')]))
with Parallel():
tm = Text(Ref(string.upper)(block.current[0]['modality'])+' Block',
y=exp['window'].height//2 + 100,
font_size=20)
tl = Text(block.current[0][Get('left_stim')+'_stim'], #+' = '+RESPS[0],
x=exp['window'].width//2 - 75,
anchor_x='right',
font_size=24)
tr = Text(block.current[0][Get('right_stim')+'_stim'], #+' = '+RESPS[1],
x=exp['window'].width//2 + 75,
anchor_x='left',
font_size=24)
tlk = Text('Press '+RESPS[0], x=tl['x']-tl['shown'].content_width//2,
y=tl['y']-25, anchor_y='top')
trk = Text('Press '+RESPS[1], x=tr['x']+tr['shown'].content_width//2,
y=tr['y']-25, anchor_y='top')
tb = Text('Press SPACEBAR to begin the next block.',
y=exp['window'].height//2 - 150,
font_size=18)
# wait for keypress to move on
KeyPress(keys=['SPACE'])
Parallel([Unshow(t) for t in [tm,tl,tr,tb,tlk,trk]])
# show orienting stim
orient = Text('+', font_size=24)
Wait(VISUAL_DUR)
# remove if visual
If(block.current[0]['modality']=='visual',
Unshow(orient))
# pause before trials
Wait(VISUAL_ISI, JITTER)
# loop over trials
with Loop(block.current) as trial:
with Parallel():
# present stim
with If(trial.current['modality']=='visual'):
vstim = Show(Text(trial.current['stim'], font_size=24),
duration=VISUAL_DUR)
with Else():
astim = Beep(duration=AUDIO_DUR,
freq=Ref(FREQS)[trial.current['stim']])
with Serial():
Wait(MIN_RT, stay_active=True)
If(trial.current['modality']=='visual',
Set('stim_time',vstim['show_time']),
Set('stim_time',astim['sound_start']))
kp = KeyPress(keys=RESPS, duration=RESP_DUR,
base_time=Get('stim_time')['time'],
correct_resp=trial.current['correct_resp'])
# log
Log(trial.current,
block=block['i'],
trial=trial['i'],
stim_on=Get('stim_time'),
response=kp['pressed'],
press_time=kp['press_time'],
rt=kp['rt'],
correct=kp['correct'])
# wait jittered isi
If(trial.current['modality']=='visual',
Wait(VISUAL_ISI, JITTER),
Wait(AUDIO_ISI, JITTER))
# remove orienting stim if auditory
If(block.current[0]['modality']=='auditory',
Unshow(orient))
# finish pulsing
if DO_PULSE:
Set('keep_pulsing',False)
serial_exp.__exit__(None, None, None)
# show a thankyou
Wait(
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/virtualrouter/lb/test_create_lb.py
|
Python
|
apache-2.0
| 2,080
| 0.004808
|
'''
Test load balance.
Test step:
1. Create 2 VM with load balance l3 network service.
2. Create a LB with 2 VMs' nic
3. Check the LB
4. Destroy VMs
@author: Youyk
'''
import os
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.zstack_test.zstack_test_load_balancer \
as zstack_lb_header
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('Create test vm with lb.')
vm1 = test_stub.create_lb_vm()
test_obj_dict.add_vm(vm1)
vm2 = test_stub.create_lb_vm()
test_obj_dict.add_vm(vm2)
#l3_name = os.environ.get('l3VlanNetworkName1')
#vr1 = test_stub.get_vr_by_private_l3_name(l3_name)
#l3_name = os.environ.get('l3NoVlanNetworkName1')
#vr2 = test_stub.get_vr_by_private_l3_name(l3_name)
vm_nic1 = vm1.get_vm().vmNics[0]
vm_nic1_uuid = vm_nic1.uuid
vm_nic2 = vm2.get_vm().vmNics[0]
vm_nic2_uuid = vm_nic2.uuid
pri_l3_uuid = vm_nic1.l3NetworkUuid
vr = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid)[0]
vr_pub_nic = test_lib.lib_find_vr_pub_nic(vr)
l3_uuid = vr_pub_nic.l3NetworkUuid
vip = test_stub.create_vip('vip_for_lb_test', l3_uuid)
test_obj_dict.add_vip(vip)
lb = zstack_lb_header.ZstackTestLoadBalancer()
lb.create('create lb test', vip.get_vip().uuid)
test_obj_dict.add_load_balancer(lb)
lb_creation_option = test_lib.lib_create_lb_listener_option()
lbl = lb.create_listener(lb_creation_option)
lbl.add_nics([vm_nic1_uuid, vm_nic2_uuid])
vm1.check()
vm2.check()
lb.check()
lb.delete()
test_obj_dict.rm_load_balancer(lb)
lb.c
|
heck()
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Create Load Balancer Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
test_
|
lib.lib_error_cleanup(test_obj_dict)
|
jonparrott/google-cloud-python
|
bigquery/tests/unit/test_job.py
|
Python
|
apache-2.0
| 189,727
| 0
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
import mock
from six.moves import http_client
try:
import pandas
except (ImportError, AttributeError): # pragma: NO COVER
pandas = None
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_client(project='test-project', connection=None):
from google.cloud.bigquery.client import Client
if connection is None:
connection = _make_connection()
client = Client(
project=project, credentials=_make_credentials(), _http=object())
client._connection = connection
return client
def _make_connection(*responses):
import google.cloud.bigquery._http
from google.cloud.exceptions import NotFound
mock_conn = mock.create_autospec(google.cloud.bigquery._http.Connection)
mock_conn.api_request.side_effect = list(responses) + [NotFound('miss')]
return mock_conn
class Test__error_result_to_exception(unittest.TestCase):
def _call_fut(self, *args, **kwargs):
from google.cloud.bigquery import job
return job._error_result_to_exception(*args, **kwargs)
def test_simple(self):
error_result = {
'reason': 'invalid',
'message': 'bad request'
}
exception = self._call_fut(error_result)
self.assertEqual(exception.code, http_client.BAD_REQUEST)
self.assertTrue(exception.message.startswith('bad request'))
self.assertIn(error_result, exception.errors)
def test_missing_reason(self):
error_result = {}
exception = self._call_fut(error_result)
self.assertEqual(exception.code, http_client.INTERNAL_SERVER_ERROR)
class Test_JobReference(unittest.TestCase):
JOB_ID = 'job-id'
PROJECT = 'test-project-123'
LOCATION = 'us-central'
@staticmethod
def _get_target_class():
from google.cloud.bigquery import job
return job._JobReference
def _make_one(self, job_id, project, location):
return self._get_target_class()(job_id, project, location)
def test_ctor(self):
job_ref = self._make_one(self.JOB_ID, self.PROJECT, self.LOCATION)
self.assertEqual(job_ref.job_id, self.JOB_ID)
self.assertEqual(job_ref.project, self.PROJECT)
self.assertEqual(job_ref.location, self.LOCATION)
def test__to_api_repr(self):
job_ref = self._make_one(self.JOB_ID, self.PROJECT, self.LOCATION)
self.assertEqual(job_ref._to_api_repr(), {
'jobId': self.JOB_ID,
'projectId': self.PROJECT,
'location': self.LOCATION,
})
|
def test_from_api_repr(self):
api_repr = {
'jobId': self.JOB_ID,
'projectId': self.PROJECT,
'location': self.LOCATION,
}
job_ref = self._get_target_class()._from_api_repr(api_repr)
self.assertEqual(job_ref.job_id, self.JOB_ID)
self.assertEqual(job_ref.project, self.PROJECT)
self.
|
assertEqual(job_ref.location, self.LOCATION)
class Test_AsyncJob(unittest.TestCase):
JOB_ID = 'job-id'
PROJECT = 'test-project-123'
LOCATION = 'us-central'
@staticmethod
def _get_target_class():
from google.cloud.bigquery import job
return job._AsyncJob
def _make_one(self, job_id, client):
return self._get_target_class()(job_id, client)
def _make_derived_class(self):
class Derived(self._get_target_class()):
_JOB_TYPE = 'derived'
return Derived
def _make_derived(self, job_id, client):
return self._make_derived_class()(job_id, client)
@staticmethod
def _job_reference(job_id, project, location):
from google.cloud.bigquery import job
return job._JobReference(job_id, project, location)
def test_ctor_w_bare_job_id(self):
import threading
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertEqual(job.job_id, self.JOB_ID)
self.assertEqual(job.project, self.PROJECT)
self.assertIsNone(job.location)
self.assertIs(job._client, client)
self.assertEqual(
job._properties,
{
'jobReference': {
'projectId': self.PROJECT,
'jobId': self.JOB_ID,
},
}
)
self.assertIsInstance(job._completion_lock, type(threading.Lock()))
self.assertEqual(
job.path,
'/projects/{}/jobs/{}'.format(self.PROJECT, self.JOB_ID))
def test_ctor_w_job_ref(self):
import threading
other_project = 'other-project-234'
client = _make_client(project=other_project)
job_ref = self._job_reference(self.JOB_ID, self.PROJECT, self.LOCATION)
job = self._make_one(job_ref, client)
self.assertEqual(job.job_id, self.JOB_ID)
self.assertEqual(job.project, self.PROJECT)
self.assertEqual(job.location, self.LOCATION)
self.assertIs(job._client, client)
self.assertEqual(
job._properties,
{
'jobReference': {
'projectId': self.PROJECT,
'location': self.LOCATION,
'jobId': self.JOB_ID,
},
}
)
self.assertFalse(job._result_set)
self.assertIsInstance(job._completion_lock, type(threading.Lock()))
self.assertEqual(
job.path,
'/projects/{}/jobs/{}'.format(self.PROJECT, self.JOB_ID))
def test__require_client_w_none(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIs(job._require_client(None), client)
def test__require_client_w_other(self):
client = _make_client(project=self.PROJECT)
other = object()
job = self._make_one(self.JOB_ID, client)
self.assertIs(job._require_client(other), other)
def test_job_type(self):
client = _make_client(project=self.PROJECT)
derived = self._make_derived(self.JOB_ID, client)
self.assertEqual(derived.job_type, 'derived')
def test_labels_miss(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertEqual(job.labels, {})
def test_labels_update_in_place(self):
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
labels = job.labels
labels['foo'] = 'bar' # update in place
self.assertEqual(job.labels, {'foo': 'bar'})
def test_labels_hit(self):
labels = {
'foo': 'bar',
}
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
job._properties['labels'] = labels
self.assertEqual(job.labels, labels)
def test_etag(self):
etag = 'ETAG-123'
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.etag)
job._properties['etag'] = etag
self.assertEqual(job.etag, etag)
def test_self_link(self):
self_link = 'https://api.example.com/123'
client = _make_client(project=self.PROJECT)
job = self._make_one(self.JOB_ID, client)
self.assertIsNone(job.self_link)
job._properties['selfLink'] = self_link
self.assertEqual(job.self_link, self_link)
def test_user_email(self):
user_ema
|
GoogleCloudPlatform/cloud-opensource-python
|
badge_server/test_badge_server.py
|
Python
|
apache-2.0
| 18,516
| 0.0027
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import os
import unittest
from compatibility_lib import fake_compatibility_store
os.environ["RUN_LOCALLY"] = 'true'
# Set the cache to use local cache before importing the main module
import main
class TestBadgeServer(unittest.TestCase):
def setUp(self):
self.mock_checker = mock.Mock(autospec=True)
self.fake_store = fake_compatibility_store.CompatibilityStore()
self.patch_checker = mock.patch(
'main.badge_utils.checker', self.mock_checker)
self.patch_store = mock.patch(
'main.badge_utils.store', self.fake_store)
def test__get_missing_details_missing_inputs(self):
from compatibility_lib import compatibility_store
from compatibility_lib import package
TENSORFLOW = 'tensorflow'
TENSORFLOW_RESULT_PY2 = compatibility_store.CompatibilityResult(
packages=[package.Package(TENSORFLOW)],
python_major_version=2,
status=compatibility_store.Status.SUCCESS)
TENSORFLOW_RESULT_PY3 = compatibility_store.CompatibilityResult(
packages=[package.Package(TENSORFLOW)],
python_major_version=3,
status=compatibility_store.Status.SUCCESS)
with self.assertRaises(AssertionError):
package_names = []
results = []
main._get_missing_details(package_names, results)
with self.assertRaises(AssertionError):
package_names = []
results = [TENSORFLOW_RESULT_PY2]
main._get_missing_details(package_names, results)
with self.assertRaises(AssertionError):
package_names = []
results = [TENSORFLOW_RESULT_PY2, TENSORFLOW_RESULT_PY3]
main._get_missing_details(package_names, results)
def test__get_missing_details_too_many_inputs(self):
from compatibility_lib import compatibility_store
with self.assertRaises(AssertionError):
package_names = ['tensorflow', 'opencensus', 'compatibility-lib']
results = []
main._get_missing_details(package_names, results)
def test__get_missing_details_unsupported_packages(self):
from compatibility_lib import compatibility_store
from compatibility_lib import package
TENSORFLOW = 'tensorflow'
UNSUPPORTED = 'unsupported'
UNSUPPORTED_RESULT_PY2 = compatibility_store.CompatibilityResult(
packages=[package.Package(UNSUPPORTED)],
python_major_version=2,
status=compatibility_store.Status.UNKNOWN)
PAIR_RESULT_PY3 = compatibility_store.CompatibilityResult(
packages=[package.Package(p) for p in (TENSORFLOW, UNSUPPORTED)],
python_major_version=3,
status=compatibility_store.Status.UNKNOWN)
with self.assertRaises(AssertionError):
package_names = [UNSUPPORTED]
results = [UNSUPPORTED_RESULT_PY2]
main._get_missing_details(package_names, results)
with self.assertRaises(AssertionError):
package_names = [TENSORFLOW, UNSUPPORTED]
results = [PAIR_RESULT_PY3]
main._get_missing_details(package_names, results)
def test__get_missing_details_for_self_compatibility(self):
from compatibility_lib import compatibility_store
from compatibility_lib import configs
from compatibility_lib import package
for package_name in configs.WHITELIST_PKGS:
results = []
if package_name not in ('tensorflow'):
results.append(compatibility_store.CompatibilityResult(
packages=[package.Package(p) for p in package_name],
python_major_version=2,
status=compatibility_store.Status.SUCCESS))
if package_name not in ('apache-beam[gcp]', 'gsutil'):
results.append(compatibility_store.CompatibilityResult(
packages=[package.Package(p) for p in package_name],
python_major_version=3,
status=compatibility_store.Status.SUCCESS))
details = main._get_missing_details([package_name], results)
self.assertEqual(details, None)
def test__get_missing_details_for_pair_compatibility(self):
from compatibility_lib import compatibility_store
from compatibility_lib import configs
from compatibility_lib import package
import itertools
for p1, p2 in itertools.combinations(configs.WHITELIST_PKGS, r=2):
pkgs = [p1, p2]
results = []
if all([p not in ('tensorflow') for p in pkgs]):
results.append(compatibility_store.CompatibilityResult(
packages=[package.Package(p) for p in pkgs],
python_major_version=2,
status=compatibility_store.Status.SUCCESS))
if all([p not in ('apache-beam[gcp]', 'gsutil') for p in pkgs]):
results.append(compatibility_store.CompatibilityResult(
packages=[package.Package(p) for p in pkgs],
python_major_version=3,
status=compatibility_store.Status.SUCCESS))
details = main._get_missing_details(pkgs, results)
self.assertEqual(details, None)
def test__get_missing_details_self_fail(self):
from compatibility_lib import compatibility_store
expected = {
'opencensus':
"Missing data for packages=['opencensus'], versions=[2, 3]",
'apache-beam[gcp]':
"Missing data for packages=['apache-beam[gcp]'], versions=[2]",
'tensorflow':
"Missing data for packages=['tensorflow'], versions=[3]",}
for name, expected_details in expected.items():
package_names = [name]
results = []
details = main._get_missing_details(package_names, results)
self.assertEqual(details, expected_details)
def test__get_missing_details_pair_fail(self):
from compatibility_lib import compatibility_store
from compatibility_lib import package
package_names = ['opencensus', 'compatibility-lib']
results = [compatibility_store.CompatibilityRe
|
sult(
packages=[package.Package(name) fo
|
r name in package_names],
python_major_version=2,
status=compatibility_store.Status.SUCCESS)]
details = main._get_missing_details(package_names, results)
expected_details = ("Missing data for packages=['compatibility-lib', "
"'opencensus'], versions=[3]")
self.assertEqual(details, expected_details)
def test__get_self_compatibility_dict(self):
from compatibility_lib import compatibility_store
from compatibility_lib import package
expected = {
'py2': {'status': main.BadgeStatus.SUCCESS, 'details':
'The package does not support this version of python.'},
'py3': {'status': main.BadgeStatus.SUCCESS, 'details': 'NO DETAILS'},
}
PACKAGE = package.Package('tensorflow')
cr_py3 = compatibility_store.CompatibilityResult(
packages=[PACKAGE],
python_major_version=3,
status=compatibility_store.Status.SUCCESS)
self.fake_store._packages_to_compatibility_result[
frozenset([PACKAGE])] = [cr_py3]
with self.patch_checker, self.patch_store:
result_dict = main._get_self_compatibility_dict('tensor
|
robertjacobs/zuros
|
zuros_test/src/timed_out-and-back.py
|
Python
|
mit
| 3,599
| 0.006669
|
#!/usr/bin/env python
""" timed_out_and_back.py - Version 1.1 2013-12-20
A basic demo of the using odometry data to move the robot along
and out-and-back trajectory.
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2012 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
from geometry_msgs.msg import Twist
from math import pi
class OutAndBack():
def __init__(self):
# Give the node a name
rospy.init_node('out_and_back', anonymous=False)
# Set rospy to execute a shutdown function when exiting
rospy.on_shutdown(self.shutdown)
# Publisher to control the robot's speed
self.cmd_vel = rospy.Publisher('/cmd_vel', Twist)
# How fast will we update the robot's movement?
rate = 50
# Set the equivalent ROS rate variable
r = rospy.Rate(rate)
# Set the forward linear speed to 0.2 meters per second
linear_speed = 0.2
# Set the travel distance to 1.0 meters
goal_distance = 1.0
# How long should it take us to get there?
linear_duration = goal_distance / linear_speed
# Set the rotation speed to 1.0 radians per second
angular_speed = 1.0
# Set the rotation angle to Pi radians (180 degrees)
goal_angle = pi
# How long should it take to rotate?
angular_duration = goal_angle / angular_speed
# Loop through the two legs of the trip
for i in range(2):
# Initialize the movement command
move_cmd = Twist()
# Set the forward speed
move_cmd.linear.x = linear_speed
# Move forward for a time to go the desired distance
ticks = int(linear_duration * rate)
for t in range(ticks):
self.cmd_vel.publish(move_cmd)
r.sleep()
# Stop the robot before the rotation
move_cmd = Twist()
self.cmd_vel.publish(move_cmd)
rospy.sleep(1)
# Now rotate left roughly 180 degrees
# Set the angular speed
move_cmd.angular.z = angular_speed
# Rotate for a time to go 180 degrees
ticks = int(goal_angle * rate)
for t in range(ticks):
self.cmd_vel.publish(move_cmd)
r.sleep()
# Stop the robot before the next leg
move_cmd = Twist()
self.cmd_vel.publish(move_cmd)
rospy.sleep(1)
# Stop the robot
self.cmd_vel.publish(Twist())
def shutdown(self):
# Always stop the robot
|
when shutting down the node.
rospy.loginfo("Stopping the robot...")
self.cmd_vel.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
OutAndBack()
except:
rospy.loginfo("Out-and-Back node terminated.")
| |
dbarbier/privot
|
python/test/t_NatafIndependentCopulaEvaluation_std.py
|
Python
|
lgpl-3.0
| 685
| 0.007299
|
#! /usr/bin/env python
from openturns import *
from math import *
TESTPREAMBLE()
RandomGenerator().SetSeed(0)
try :
dim = 2
transformation = NatafIndependentCopulaEvaluatio
|
n(dim)
print "transformation=", repr(transformation)
point = NumericalPoint(dim, 0.75)
print "transformation(", point, ")=", repr(transformation(point))
print "transformation parameters gradient=", repr(transformation.parametersGradient(point))
print "input dimension=", transformation.getInputDimension()
print "output dimension=", transf
|
ormation.getOutputDimension()
except :
import sys
print "t_NatafIndependentCopulaEvaluation_std.py", sys.exc_type, sys.exc_value
|
statwonk/thinkbayes
|
monty.py
|
Python
|
mit
| 596
| 0.008389
|
"""Thi
|
s file contains code for use with "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
tb = imp.load_source('thinkbayes', '/home/chris/installations/python/ThinkB
|
ayes/thinkbayes.py')
from thinkbayes import Pmf
from thinkbayes import Suite
class Monty(Suite):
def Likelihood(self, data, hypo):
if hypo == data:
return 0
elif hypo == 'A':
return 0.5
else:
return 1
suite = Monty('ABC')
suite.Update('A')
suite.Print()
|
hiroki8080/Kokemomo
|
kokemomo/plugins/engine/controller/km_engine.py
|
Python
|
mit
| 1,820
| 0.002747
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .km_backend.km_plugin_manager import KMBaseController
from .km_exception import log
__author__ = 'hiroki'
class KMEngine(KMBaseController):
def get_name(self):
return 'engine'
def get_route_list(self):
list = (
{'rule': '/engine-js/<filename>', 'method': 'GET', 'target': self.engine_js_static, 'name': 'engine_static_js'},
{'rule': '/engine-css/<filename>', 'method': 'GET', 'target': self.engine_css_static, 'name': 'engine_static_css'},
{'rule': '/engine-img/<filename>', 'method': 'GET', 'target': self.engine_img_static, 'name': 'engine_static_img'},
{'rule': '/error', 'method': 'GET', 'target': self.engine_error},
)
return list
@log
def engine_js_static(self, filename):
"""
set javascript files.
:param filename: javascript file name.
:return: static path.
"""
file_path = 'kokemomo/plugins/engine/view/resource/js'
return self.load_static_file(filename, root=file_path)
@log
def engine_css_sta
|
tic(self,
|
filename):
"""
set css files.
:param filename: css file name.
:return: static path.
"""
file_path = 'kokemomo/plugins/engine/view/resource/css'
return self.load_static_file(filename, root=file_path)
@log
def engine_img_static(self, filename):
"""
set image files.
:param filename: image file name.
:return: static path.
"""
file_path = 'kokemomo/plugins/engine/view/resource/img'
return self.load_static_file(filename, root=file_path)
def engine_error(self):
return "An error has occurred." \
" Please contact the server administrator."
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.5.0/Lib/asyncio/unix_events.py
|
Python
|
mit
| 34,324
| 0.000146
|
"""Selector event loop for Unix with signal handling."""
import errno
import os
import signal
import socket
import stat
import subprocess
import sys
import threading
import warnings
from . import base_events
from . import base_subprocess
from . import compat
from . import constants
from . import coroutines
from . import events
from . import futures
from . import selector_events
from . import selectors
from . import transports
from .coroutines import coroutine
from .log import logger
__all__ = ['SelectorEventLoop',
'AbstractChildWatcher', 'SafeChildWatcher',
'FastChildWatcher', 'DefaultEventLoopPolicy',
]
if sys.platform == 'win32': # pragma: no cover
raise ImportError('Signals are not really supported on Windows')
def _sighandler_noop(signum, frame):
"""Dummy signal handler."""
pass
class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
"""Unix event loop.
Adds signal handling and UNIX Domain Socket support to SelectorEventLoop.
"""
def __init__(self, selector=None):
super().__init__(selector)
self._signal_handlers = {}
def _socketpair(self):
return socket.socketpair()
def close(self):
super().close()
for sig in list(self._signal_handlers):
self.remove_signal_handler(sig)
def _process_self_data(self, data):
for signum in data:
if not signum:
# ignore null bytes written by _write_to_self()
continue
self._handle_signal(signum)
def add_signal_handler(self, sig, callback, *args):
"""Add a handler for a signal. UNIX only.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if (coroutines.iscoroutine(callback)
or coroutines.iscoroutinefunction(callback)):
raise TypeError("coroutines cannot be used "
"with add_signal_handler()")
self._check
|
_signal(sig)
self._check_closed()
try:
# set_wakeup_fd() raises ValueError if this is not the
# main thread. By callin
|
g it early we ensure that an
# event loop running in another thread cannot add a signal
# handler.
signal.set_wakeup_fd(self._csock.fileno())
except (ValueError, OSError) as exc:
raise RuntimeError(str(exc))
handle = events.Handle(callback, args, self)
self._signal_handlers[sig] = handle
try:
# Register a dummy signal handler to ask Python to write the signal
# number in the wakup file descriptor. _process_self_data() will
# read signal numbers from this file descriptor to handle signals.
signal.signal(sig, _sighandler_noop)
# Set SA_RESTART to limit EINTR occurrences.
signal.siginterrupt(sig, False)
except OSError as exc:
del self._signal_handlers[sig]
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except (ValueError, OSError) as nexc:
logger.info('set_wakeup_fd(-1) failed: %s', nexc)
if exc.errno == errno.EINVAL:
raise RuntimeError('sig {} cannot be caught'.format(sig))
else:
raise
def _handle_signal(self, sig):
"""Internal helper that is the actual signal handler."""
handle = self._signal_handlers.get(sig)
if handle is None:
return # Assume it's some race condition.
if handle._cancelled:
self.remove_signal_handler(sig) # Remove it properly.
else:
self._add_callback_signalsafe(handle)
def remove_signal_handler(self, sig):
"""Remove a handler for a signal. UNIX only.
Return True if a signal handler was removed, False if not.
"""
self._check_signal(sig)
try:
del self._signal_handlers[sig]
except KeyError:
return False
if sig == signal.SIGINT:
handler = signal.default_int_handler
else:
handler = signal.SIG_DFL
try:
signal.signal(sig, handler)
except OSError as exc:
if exc.errno == errno.EINVAL:
raise RuntimeError('sig {} cannot be caught'.format(sig))
else:
raise
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except (ValueError, OSError) as exc:
logger.info('set_wakeup_fd(-1) failed: %s', exc)
return True
def _check_signal(self, sig):
"""Internal helper to validate a signal.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if not isinstance(sig, int):
raise TypeError('sig must be an int, not {!r}'.format(sig))
if not (1 <= sig < signal.NSIG):
raise ValueError(
'sig {} out of range(1, {})'.format(sig, signal.NSIG))
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra)
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
@coroutine
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
with events.get_child_watcher() as watcher:
waiter = futures.Future(loop=self)
transp = _UnixSubprocessTransport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
waiter=waiter, extra=extra,
**kwargs)
watcher.add_child_handler(transp.get_pid(),
self._child_watcher_callback, transp)
try:
yield from waiter
except Exception as exc:
# Workaround CPython bug #23353: using yield/yield-from in an
# except block of a generator doesn't clear properly
# sys.exc_info()
err = exc
else:
err = None
if err is not None:
transp.close()
yield from transp._wait()
raise err
return transp
def _child_watcher_callback(self, pid, returncode, transp):
self.call_soon_threadsafe(transp._process_exited, returncode)
@coroutine
def create_unix_connection(self, protocol_factory, path, *,
ssl=None, sock=None,
server_hostname=None):
assert server_hostname is None or isinstance(server_hostname, str)
if ssl:
if server_hostname is None:
raise ValueError(
'you have to pass server_hostname when using ssl')
else:
if server_hostname is not None:
raise ValueError('server_hostname is only meaningful with ssl')
if path is not None:
if sock is not None:
raise ValueError(
'path and sock can not be specified at the same time')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
try:
sock.setblocking(False)
yield from self.sock_connect(sock, path)
except:
sock.close()
raise
else:
if sock is None:
raise ValueError('no path and sock were specified')
sock.setblocking(False)
transport, protocol = yield from self.
|
Sticklyman1936/workload-automation
|
wlauto/modules/__init__.py
|
Python
|
apache-2.0
| 585
| 0
|
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the
|
License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT W
|
ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
|
sittizen/django_dropimages
|
django_dropimages/models.py
|
Python
|
mit
| 859
| 0.002328
|
from django.conf import settings
from django.db import models
from django_dropimages import settings as di_settings
# if no custom image models is present I load my own
if not di_settings.CONFIG['DROPIMAGEGALLERY_MODEL']:
class DropimagesGallery(models.Model):
gallery_identifier = models.CharField(max_length=36)
creation_timestamp = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
# if no custom image models is present I load my own
if not di_settings.CONFIG['DROPIMAGE_MODEL']:
|
class DropimagesImage(models.Model):
|
dropimages_gallery = models.ForeignKey('django_dropimages.DropimagesGallery', related_name='images')
dropimages_original_filename = models.CharField(max_length=256)
image = models.ImageField(upload_to='%y/%m/%d')
|
tyll/bodhi
|
bodhi/tests/server/services/test_updates.py
|
Python
|
gpl-2.0
| 193,782
| 0.001099
|
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module contains tests for bodhi.server.services.updates."""
from datetime import datetime, timedelta
import copy
import textwrap
import time
import urlparse
from mock import ANY
from webtest import TestApp
import mock
from bodhi.server import main
from bodhi.server.config import config
from bodhi.server.models import (
BuildrootOverride, Group, RpmPackage, ModulePackage, Release,
ReleaseState, RpmBuild, Update, UpdateRequest, UpdateStatus, UpdateType,
UpdateSeverity, User, TestGatingStatus)
from bodhi.tests.server import base
YEAR = time.localtime().tm_year
mock_valid_requirements = {
'target': 'bodhi.server.validators._get_valid_requirements',
'return_value': ['rpmlint', 'upgradepath'],
}
mock_uuid4_version1 = {
'target': 'uuid.uuid4',
'return_value': 'this is a consistent string',
}
mock_uuid4_version2 = {
'target': 'uuid.uuid4',
'return_value': 'this is another consistent string',
}
mock_taskotron_results = {
'target': 'bodhi.server.util.taskotron_results',
'return_value': [{
"outcome": "PASSED",
"data": {},
"testcase": {"name": "rpmlint"}
}],
}
mock_failed_taskotron_results = {
'target': 'bodhi.server.util.taskotron_results',
'return_value': [{
"outcome": "FAILED",
"data": {},
"testcase": {"name": "rpmlint"}
}],
}
mock_absent_taskotron_results = {
'target': 'bodhi.server.util.task
|
otron_results',
'return_value': [],
}
class TestNewUpdate(base.BaseTestCase):
"""
This class contains tests for the new_update() function.
"""
@mock.patch(**mock_valid_requirements)
def test_invalid_build_name(self, *args):
res = self.app.post_json('/up
|
dates/', self.get_update(u'bodhi-2.0-1.fc17,invalidbuild-1.0'),
status=400)
assert 'Build not in name-version-release format' in res, res
@mock.patch(**mock_valid_requirements)
def test_empty_build_name(self, *args):
res = self.app.post_json('/updates/', self.get_update([u'']), status=400)
self.assertEquals(res.json_body['errors'][0]['name'], 'builds.0')
self.assertEquals(res.json_body['errors'][0]['description'], 'Required')
@mock.patch(**mock_valid_requirements)
def test_fail_on_edit_with_empty_build_list(self, *args):
update = self.get_update()
update['edited'] = update['builds'] # the update title..
update['builds'] = []
res = self.app.post_json('/updates/', update, status=400)
self.assertEquals(len(res.json_body['errors']), 2)
self.assertEquals(res.json_body['errors'][0]['name'], 'builds')
self.assertEquals(
res.json_body['errors'][0]['description'],
'You may not specify an empty list of builds.')
self.assertEquals(res.json_body['errors'][1]['name'], 'builds')
self.assertEquals(
res.json_body['errors'][1]['description'],
'ACL validation mechanism was unable to determine ACLs.')
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_unicode_description(self, publish, *args):
update = self.get_update('bodhi-2.0.0-2.fc17')
update['notes'] = u'This is wünderfül'
r = self.app.post_json('/updates/', update)
up = r.json_body
self.assertEquals(up['title'], u'bodhi-2.0.0-2.fc17')
self.assertEquals(up['notes'], u'This is wünderfül')
self.assertIsNotNone(up['date_submitted'])
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
@mock.patch(**mock_valid_requirements)
def test_duplicate_build(self, *args):
res = self.app.post_json(
'/updates/', self.get_update([u'bodhi-2.0-2.fc17', u'bodhi-2.0-2.fc17']), status=400)
assert 'Duplicate builds' in res, res
@mock.patch(**mock_valid_requirements)
def test_multiple_builds_of_same_package(self, *args):
res = self.app.post_json('/updates/', self.get_update([u'bodhi-2.0-2.fc17',
u'bodhi-2.0-3.fc17']),
status=400)
assert 'Multiple bodhi builds specified' in res, res
@mock.patch(**mock_valid_requirements)
def test_invalid_autokarma(self, *args):
res = self.app.post_json('/updates/', self.get_update(stable_karma=-1),
status=400)
assert '-1 is less than minimum value 1' in res, res
res = self.app.post_json('/updates/', self.get_update(unstable_karma=1),
status=400)
assert '1 is greater than maximum value -1' in res, res
@mock.patch(**mock_valid_requirements)
def test_duplicate_update(self, *args):
res = self.app.post_json('/updates/', self.get_update(u'bodhi-2.0-1.fc17'),
status=400)
assert 'Update for bodhi-2.0-1.fc17 already exists' in res, res
@mock.patch(**mock_valid_requirements)
def test_invalid_requirements(self, *args):
update = self.get_update()
update['requirements'] = 'rpmlint silly-dilly'
res = self.app.post_json('/updates/', update, status=400)
assert "Required check doesn't exist" in res, res
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_no_privs(self, publish, *args):
user = User(name=u'bodhi')
self.db.add(user)
self.db.commit()
app = TestApp(main({}, testing=u'bodhi', session=self.db, **self.app_settings))
res = app.post_json('/updates/', self.get_update(u'bodhi-2.1-1.fc17'),
status=400)
expected_error = {
"location": "body",
"name": "builds",
"description": ("bodhi is not a member of \"packager\", which is a"
" mandatory packager group")
}
assert expected_error in res.json_body['errors'], \
res.json_body['errors']
self.assertEquals(publish.call_args_list, [])
@mock.patch(**mock_taskotron_results)
@mock.patch(**mock_valid_requirements)
@mock.patch('bodhi.server.notifications.publish')
def test_provenpackager_privs(self, publish, *args):
"Ensure provenpackagers can push updates for any package"
user = User(name=u'bodhi')
self.db.add(user)
self.db.commit()
group = self.db.query(Group).filter_by(name=u'provenpackager').one()
user.groups.append(group)
app = TestApp(main({}, testing=u'bodhi', session=self.db, **self.app_settings))
update = self.get_update(u'bodhi-2.1-1.fc17')
update['csrf_token'] = app.get('/csrf').json_body['csrf_token']
res = app.post_json('/updates/', update)
assert 'bodhi does not have commit access to bodhi' not in res, res
build = self.db.query(RpmBuild).filter_by(nvr=u'bodhi-2.1-1.fc17').one()
assert build.update is not None
publish.assert_called_once_with(
topic='update.request.testing', msg=mock.ANY)
@mock.patch(**mock_valid_requirements)
def test_pkgdb_outage(self, *args):
"Test the case where our call to the pkgdb throws an exception"
settings = self.app_settings.copy()
settings['acl_system']
|
0xc0170/project_generator
|
tests/test_tools/test_uvision5.py
|
Python
|
apache-2.0
| 3,726
| 0.00161
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import shutil
from unittest import TestCase
from nose.tools import *
from project_generator.generate import Generator
from project_generator.project import Project
from project_generator.settings import ProjectSettings
from project_generator.tools.uvision import uVisionDefinitions, Uvision5
from .simple_project import project_1_yaml, project_2_yaml, projects_1_yaml
class TestProject(TestCase):
"""test things related to the uvision tool"""
def setUp(self):
if not os.path.exists('test_workspace'):
os.makedirs('test_workspace')
# write project file
with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:
f.write(yaml.dump(project_1_yaml, default_flow_style=False))
with open(os.path.join(os.getcwd(), 'test_workspace/project_2.yaml'), 'wt') as f:
f.write(yaml.dump(project_2_yaml, default_flow_style=False))
# write projects file
with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f:
f.write(yaml.dump(projects_1_yaml, default_flow_style=False))
self.project = next(Generator(projects_1_yaml).generate('project_1'))
self.project2 = next(Generator(projects_1_yaml).generate('project_2'))
self.defintions = uVisionDefinitions()
self.uvision = Uvision5(self.project.project, ProjectSettings())
def tearDown(self):
# remove created directory
shutil.rmtree('test_workspace', ignore_errors=True)
shutil.rmtree('generated_projects', ignore_errors=True)
# this is now commented, a project needs to be adjusted before exporting, so this one
# fails. I'll keep it for a while as a reminder
# def test_export(self):
# self.uvision.export_project()
def test_export_project(self):
result = self.project.generate('uvision5', False)
# it should get generated files from the last export
projectfiles = self.project.get_generated_project_files('uvision5')
assert result == 0
assert projectfiles
assert os.path.splitext(projectfiles['files'][0])[1] == '.uvprojx'
def test_export_project_to_diff_directory(self):
project_1_yaml['common']['export_dir'] = ['create_this_folder']
with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:
f.write(yaml.dump(project_1_yaml, default_flow_style=False))
for project in Generator(projects_1_yaml).generate('proj
|
ect_1'):
result = project.generate('uvision5', False)
assert
|
result == 0
assert os.path.isdir('create_this_folder')
shutil.rmtree('create_this_folder')
def test_build_project(self):
result_export = self.project.generate('uvision5', False)
result_build = self.project.build('uvision5')
assert result_export == 0
# nonvalid project, should fail with errors
assert result_build == -1
def test_template(self):
# should fail as template does not exists
result = self.project2.generate('uvision5', False)
assert result == 0
|
exord/bayev
|
pastislib.py
|
Python
|
mit
| 7,786
| 0
|
"""
Module containing useful functions to link PASTIS MCMC posterior samples with
the bayev package.
"""
im
|
port os
import pickle
import importlib
import numpy as np
import PASTIS_NM
import PASTIS
|
_NM.MCMC as MCMC
from PASTIS_NM import resultpath, configpath
def read_pastis_file(target, simul, pastisfile=None):
"""Read configuration dictionary."""
if pastisfile is None:
# Get input_dict
configname = os.path.join(configpath, target,
target + '_' + simul + '.pastis')
else:
configname = pastisfile
try:
f = open(configname)
except IOError:
raise IOError('Configuration file {} not found!'.format(configname))
dd = pickle.load(f)
f.close()
return dd
def get_datadict(target, simul, pastisfile=None):
config_dicts = read_pastis_file(target, simul, pastisfile)
return PASTIS_NM.readdata(config_dicts[2])[0]
def get_priordict(target, simul, pastisfile=None):
config_dicts = read_pastis_file(target, simul, pastisfile)
return MCMC.priors.prior_constructor(config_dicts[1], {})
def get_posterior_samples(target, simul, mergefile=None,
suffix='_Beta1.000000_mergedchain.dat'):
if mergefile is None:
mergepath = os.path.join(resultpath, target,
target + '_' + simul + suffix)
else:
mergepath = mergefile
f = open(mergepath, 'r')
vdm = pickle.load(f)
f.close()
return vdm
def pastis_init(target, simul, posteriorfile=None, datadict=None,
pastisfile=None):
# Read configuration dictionaries.
configdicts = read_pastis_file(target, simul, pastisfile)
infodict, input_dict = configdicts[0], configdicts[1].copy()
# Read data dictionary.
if datadict is None:
datadict = get_datadict(target, simul, pastisfile=pastisfile)
# Obtain PASTIS version the merged chain was constructed with.
vdm = get_posterior_samples(target, simul, mergefile=posteriorfile)
modulename = vdm.__module__.split('.')[0]
# Import the correct PASTIS version used to construct a given posterior
# sample
pastis = importlib.import_module(modulename)
# To deal with potential drifts, we need initialize to fix TrefRV.
pastis.initialize(infodict, datadict, input_dict)
# import PASTIS_rhk.MCMC as MCMC
# MCMC.PASTIS_MCMC.get_likelihood
importlib.import_module('.MCMC.PASTIS_MCMC', package=pastis.__name__)
importlib.import_module('.AstroClasses', package=pastis.__name__)
importlib.import_module('.ObjectBuilder', package=pastis.__name__)
importlib.import_module('.models.RV', package=pastis.__name__)
importlib.reload(pastis.AstroClasses)
importlib.reload(pastis.ObjectBuilder)
importlib.reload(pastis.models.RV)
importlib.reload(pastis.MCMC.PASTIS_MCMC)
return
def pastis_loglike(samples, params, target, simul, posteriorfile=None,
datadict=None, pastisfile=None):
"""
A wrapper to run the PASTIS.MCMC.get_likelihood function.
Computes the loglikelihood on a series of points given in samples using
PASTIS.MCMC.get_likelihood.
:param np.array samples: parameter samples on which to compute log
likelihood. Array dimensions must be (n x k), where *n* is the number of
samples and *k* is the number of model parameters.
:param list params: parameter names. Must be in the PASTIS format: \
objectname_parametername.
:return:
"""
# Read configuration dictionaries.
configdicts = read_pastis_file(target, simul, pastisfile)
input_dict = configdicts[1].copy()
# Read data dictionary.
if datadict is None:
datadict = get_datadict(target, simul, pastisfile=pastisfile)
# Obtain PASTIS version the merged chain was constructed with.
vdm = get_posterior_samples(target, simul, mergefile=posteriorfile)
modulename = vdm.__module__.split('.')[0]
# Import the correct PASTIS version used to construct a given posterior
# sample
pastis = importlib.import_module(modulename)
"""
# To deal with potential drifts, we need initialize to fix TrefRV.
pastis.initialize(infodict, datadict, input_dict)
# import PASTIS_rhk.MCMC as MCMC
# MCMC.PASTIS_MCMC.get_likelihood
importlib.import_module('.MCMC.PASTIS_MCMC', package=pastis.__name__)
importlib.import_module('.AstroClasses', package=pastis.__name__)
importlib.import_module('.ObjectBuilder', package=pastis.__name__)
importlib.import_module('.models.RV', package=pastis.__name__)
reload(pastis.AstroClasses)
reload(pastis.ObjectBuilder)
reload(pastis.models.RV)
reload(pastis.MCMC.PASTIS_MCMC)
"""
# Prepare output arrays
loglike = np.zeros(samples.shape[0])
for s in range(samples.shape[0]):
for parameter_index, full_param_name in enumerate(params):
# Modify input_dict
obj_name, param_name = full_param_name.split('_')
input_dict[obj_name][param_name][0] = samples[s, parameter_index]
# Construct chain state
chain_state, labeldict = \
pastis.MCMC.tools.state_constructor(input_dict)
try:
# Compute likelihood for this state
ll, loglike[s], likeout = \
pastis.MCMC.PASTIS_MCMC.get_likelihood(chain_state,
input_dict,
datadict, labeldict,
False,
False)
except (ValueError, RuntimeError, pastis.EvolTrackError,
pastis.EBOPparamError):
print('Error in likelihood computation, setting lnlike to -n.inf')
loglike[s] = -np.inf
pass
return loglike
def pastis_logprior(samples, params, target, simul, posteriorfile=None,
pastisfile=None):
"""
A wrapper to run the PASTIS.MCMC.get_likelihood function.
Computes the loglikelihood on a series of points given in samples using
PASTIS.MCMC.get_likelihood.
:param np.array samples: parameter samples on which to compute log
likelihood. Array dimensions must be (n x k), where *n* is the number of
samples and *k* is the number of model parameters.
:param list params: parameter names.
:return:
"""
# Read configuration dictionaries.
configdicts = read_pastis_file(target, simul, pastisfile)
input_dict = configdicts[1].copy()
priordict = get_priordict(target, simul, pastisfile=pastisfile)
# Obtain PASTIS version the merged chain was constructed with.
vdm = get_posterior_samples(target, simul, mergefile=posteriorfile)
modulename = vdm.__module__.split('.')[0]
# Import the correct PASTIS version used to construct a given posterior
# sample
pastis = importlib.import_module(modulename)
importlib.import_module('.MCMC.PASTIS_MCMC', package=pastis.__name__)
# Prepare output arrays
logprior = np.zeros(samples.shape[0])
for s in range(samples.shape[0]):
for parameter_index, full_param_name in enumerate(params):
# Modify input_dict
obj_name, param_name = full_param_name.split('_')
input_dict[obj_name][param_name][0] = samples[s, parameter_index]
# Construct chain state
chain_state, labeldict = \
pastis.MCMC.tools.state_constructor(input_dict)
# Compute prior distribution for this state
prior_probability = pastis.MCMC.priors.compute_priors(
priordict, labeldict)[0]
logprior[s] = np.log(prior_probability)
return logprior
|
blowmage/gcloud-python
|
regression/pubsub.py
|
Python
|
apache-2.0
| 4,859
| 0
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest2
from gcloud import _helpers
from gcloud import pubsub
from gcloud.pubsub.subscription import Subscription
from gcloud.pubsub.topic import Topic
_helpers._PROJECT_ENV_VAR_NAME = 'GCLOUD_TESTS_PROJECT_ID'
pubsub.set_defaults()
class TestPubsub(unittest2.TestCase):
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.delete()
def test_create_topic(self):
TOPIC_NAME = 'a-new-topic'
topic = Topic(TOPIC_NAME)
self.assertFalse(topic.exists())
topic.create()
self.to_delete.append(topic)
self.assertTrue(topic.exists())
self.assertEqual(topic.name, TOPIC_NAME)
def test_list_topics(self):
topics_to_create = [
'new%d' % (1000 * time.time(),),
'newer%d' % (1000 * time.time(),),
'newest%d' % (1000 * time.time(),),
]
for topic_name in topics_to_create:
topic = Topic(topic_name)
topic.create()
self.to_delete.append(topic)
# Retrieve the topics.
all_topics, _ = pubsub.list_topics()
project = pubsub.get_default_project()
created = [topic for topic in all_topics
if topic.name in topics_to_create and
topic.project == project]
self.assertEqual(len(created), len(topics_to_create))
def test_create_subscription(self):
TOPIC_NAME = 'subscribe-me'
topic = Topic(TOPIC_NAME)
self.assertFalse(topic.exists())
topic.create()
self.to_delete.append(topic)
SUBSCRIPTION_NAME = 'subscribing-now'
subscription = Subscription(SUBSCRIPTION_NAME, topic)
self.assertFalse(subscription.exists())
subscription.create()
self.to_delete.append(subscription)
self.assertTrue(subscription.exists())
self.assertEqual(subscription.name, SUBSCRIPTION_NAME)
self.assertTrue(subscription.topic is topic)
def test_list_subscriptions(self):
TOPIC_NAME = 'subscribe-me'
topic = Topic(TOPIC_NAME)
self.assertFalse(topic.exists())
topic.create()
self.to_delete.append(topic)
subscriptions_to_create = [
'new%d' % (1000 * time.time(),),
'newer%
|
d' % (1000 * time.time(),),
'newest%d' % (1000 * time.time(),),
]
for subscription_name in subscriptions_to_create:
subscription = Subscription(subscription_name, topic)
subscription.create()
self.to_delete.append(subscription)
# Retrieve the subscriptions.
all_subscriptions, _
|
= pubsub.list_subscriptions()
created = [subscription for subscription in all_subscriptions
if subscription.name in subscriptions_to_create and
subscription.topic.name == TOPIC_NAME]
self.assertEqual(len(created), len(subscriptions_to_create))
def test_message_pull_mode_e2e(self):
TOPIC_NAME = 'subscribe-me'
topic = Topic(TOPIC_NAME, timestamp_messages=True)
self.assertFalse(topic.exists())
topic.create()
self.to_delete.append(topic)
SUBSCRIPTION_NAME = 'subscribing-now'
subscription = Subscription(SUBSCRIPTION_NAME, topic)
self.assertFalse(subscription.exists())
subscription.create()
self.to_delete.append(subscription)
MESSAGE_1 = b'MESSAGE ONE'
MESSAGE_2 = b'MESSAGE ONE'
EXTRA_1 = 'EXTRA 1'
EXTRA_2 = 'EXTRA 2'
topic.publish(MESSAGE_1, extra=EXTRA_1)
topic.publish(MESSAGE_2, extra=EXTRA_2)
received = subscription.pull(max_messages=2)
ack_ids = [recv[0] for recv in received]
subscription.acknowledge(ack_ids)
messages = [recv[1] for recv in received]
def _by_timestamp(message):
return message.timestamp
message1, message2 = sorted(messages, key=_by_timestamp)
self.assertEqual(message1.data, MESSAGE_1)
self.assertEqual(message1.attributes['extra'], EXTRA_1)
self.assertEqual(message2.data, MESSAGE_2)
self.assertEqual(message2.attributes['extra'], EXTRA_2)
|
cnwalter/screenly-ose
|
lib/assets_helper.py
|
Python
|
gpl-2.0
| 4,416
| 0.001359
|
import db
import queries
import datetime
FIELDS = ["asset_id", "name", "uri", "start_date",
"end_date", "duration", "mimetype", "is_enabled", "nocache", "play_order"]
create_assets_table = 'CREATE TABLE assets(asset_id text primary key, name text, uri text, md5 text, start_date timestamp, end_date timestamp, duration text, mimetype text, is_enabled integer default 0, nocache integer default 0, play_order integer default 0)'
# Note all times are naive for legacy reasons but always UTC.
get_time = datetime.datetime.utcnow
def is_active(asset, at_time=None):
"""Accepts an asset dictionary and determines if it
is active at the given time. If no time is specified, 'now' is used.
>>> asset = {'asset_id': u'4c8dbce552edb5812d3a866cfe5f159d', 'mimetype': u'web', 'name': u'WireLoad', 'end_date': datetime.datetime(2013, 1, 19, 23, 59), 'uri': u'http://www.wireload.net', 'duration': u'5', 'is_enabled': True, 'nocache': 0, 'play_order': 1, 'start_date': da
|
tetime.
|
datetime(2013, 1, 16, 0, 0)};
>>> is_active(asset, datetime.datetime(2013, 1, 16, 12, 00))
True
>>> is_active(asset, datetime.datetime(2014, 1, 1))
False
>>> asset['is_enabled'] = False
>>> is_active(asset, datetime.datetime(2013, 1, 16, 12, 00))
False
"""
if asset['is_enabled'] and asset['start_date'] and asset['end_date']:
at = at_time or get_time()
return asset['start_date'] < at < asset['end_date']
return False
def get_playlist(conn):
"""Returns all currently active assets."""
return filter(is_active, read(conn))
def mkdict(keys):
"""Returns a function that creates a dict from a database record."""
return lambda row: dict([(keys[ki], v) for ki, v in enumerate(row)])
def create(conn, asset):
"""
Create a database record for an asset.
Returns the asset.
Asset's is_active field is updated before returning.
"""
if 'is_active' in asset:
asset.pop('is_active')
with db.commit(conn) as c:
c.execute(queries.create(asset.keys()), asset.values())
asset.update({'is_active': is_active(asset)})
return asset
def create_multiple(conn, assets):
"""
Create a database record for each asset.
Returns asset list.
Asset's is_active field is updated before returning.
"""
with db.commit(conn) as c:
for asset in assets:
if 'is_active' in asset:
asset.pop('is_active')
c.execute(queries.create(asset.keys()), asset.values())
asset.update({'is_active': is_active(asset)})
return assets
def read(conn, asset_id=None, keys=FIELDS):
"""
Fetch one or more assets from the database.
Returns a list of dicts or one dict.
Assets' is_active field is updated before returning.
"""
assets = []
mk = mkdict(keys)
with db.cursor(conn) as c:
if asset_id is None:
c.execute(queries.read_all(keys))
else:
c.execute(queries.read(keys), [asset_id])
assets = [mk(asset) for asset in c.fetchall()]
[asset.update({'is_active': is_active(asset)}) for asset in assets]
if asset_id and len(assets):
return assets[0]
return assets
def update(conn, asset_id, asset):
"""
Update an asset in the database.
Returns the asset.
Asset's asset_id and is_active field is updated before returning.
"""
del asset['asset_id']
if 'is_active' in asset:
asset.pop('is_active')
with db.commit(conn) as c:
c.execute(queries.update(asset.keys()), asset.values() + [asset_id])
asset.update({'asset_id': asset_id})
if 'start_date' in asset:
asset.update({'is_active': is_active(asset)})
return asset
def delete(conn, asset_id):
"""Remove an asset from the database."""
with db.commit(conn) as c:
c.execute(queries.remove, [asset_id])
def save_ordering(db_conn, ids):
"""Order assets. Move to last position assets which not presented in list of id"""
assets = read(db_conn)
for play_order, asset_id in enumerate(ids):
update(db_conn, asset_id, {'asset_id': asset_id, 'play_order': play_order})
# Set the play order to a high value for all inactive assets.
for asset in assets:
if asset['asset_id'] not in ids:
update(db_conn, asset['asset_id'], {'asset_id': asset['asset_id'], 'play_order': len(ids)})
|
oblique-labs/pyVM
|
rpython/jit/backend/ppc/test/test_dict.py
|
Python
|
mit
| 258
| 0
|
from rpython.jit.backend.ppc.test.support import JitPPCMixi
|
n
from rpython.jit.metainterp.test.test_dict im
|
port DictTests
class TestDict(JitPPCMixin, DictTests):
# for the individual tests see
# ====> ../../../metainterp/test/test_dict.py
pass
|
hellhovnd/dentexchange
|
dentexchange/apps/employer/tests/test_job_posting.py
|
Python
|
bsd-3-clause
| 398
| 0
|
# -*- coding:utf-8 -*-
im
|
port unittest
import mock
from ..models import JobPosting
class JobPostingTestCase(unittest.TestCase):
def test_unicode_should_return_position_name(self):
# setup
model = JobPosting()
model.position_name = 'Position Name'
# action
email = unicode(model)
# assert
self.assertEqual(model.
|
position_name, email)
|
Jianlong-Peng/rp
|
python/evaluate_smartcyp_v2.py
|
Python
|
gpl-2.0
| 5,047
| 0.015257
|
'''
#=============================================================================
# FileName: evaluate_smartcyp_v2.py
# Desc:
# Author: jlpeng
# Email: jlpeng1201@gmail.com
# HomePage:
# Version: 0.0.1
# Created: 2015-03-09 20:38:49
# LastChange: 2015-03-10 17:58:21
# History:
#=============================================================================
'''
import sys
mol_names = []
def main(argv=sys.argv):
if len(argv) != 6:
print "\n Usage: %s k des_file som_file predict.csv sdf_file"%argv[0]
print " k : report top-1 to top-k results"
print " des_file : descriptor, same as input for gap_predict"
print " som_file : file of actual SOMs"
print " each line should be `name\\tatom1\\tatom2...`"
print " predict.csv: file generated by smartcyp"
print " sdf_file : the one used to generate `predict.csv`"
print "\nAttention"
print " 1. reports are based on SOMs with only one atom"
print " - considering all types of SOMs"
print " - exclude SOM type `6`(O-conjugation)"
print ""
sys.exit(1)
k = int(argv[1])
mol_names = load_mol_names(argv[5])
des = load_des(argv[2]) #key=name, value=[(atom,type),...]
actual_all,actual_no6 = load_som(argv[3], des) #key=name, value=[site1,site2,...]
predict = load_predict(argv[4],des,mol_names) #key=name, value=[(atom,rank,score),...]
print "===report considering all SOMs except those with more than one atoms==="
do_evaluate(actual_all,predict,k)
print "\n===report exclude SOM type 6 (O-conjugation) and more than one atoms==="
do_evaluate(actual_no6,predict,k)
def load_mol_names(infile):
mol_names = []
inf = open(infile,'r')
line = inf.readline()
while line != "":
mol_names.append(line.strip())
while line!="" and line.strip()!="$$$$":
line = inf.readline()
line = inf.readline()
inf.close()
return mol_names
def do_evaluate(actual,predict,k):
results = []
for i in xrange(1,k+1):
total,miss,right = evaluate(actual,predict,i)
error = total-miss-right
results.append((i,right,error))
print "totally %d samples, of which %d has no SOM labeled"%(total,miss)
print "k total miss right error accuracy"
for k,right,error in results:
print "%-2d %-5d %-5d %-g"%(k,right,error,1.*right/(right+error))
print ""
def load_des(infile):
des = {}
inf = open(infile,'r')
line = inf.readline()
while line != "":
name = line.split()[0].split("\\")[-1]
name = name[:name.rfind(".")]
des[name] = []
line = inf.readline()
while line!="" and line.startswith("\t"):
temp = line.strip().split(",")[0]
atom,type,val = temp.split(":")
des[name].append((atom,type))
line = inf.readline()
inf.close()
return des
def valid(actual,atom):
for a,t in actual:
if a==atom and t=='6':
return False
return True
def load_som(infile, des):
actual_all = {}
actual_no6 = {}
inf = open(infile,'r')
count = 0
for line in inf:
line = line.strip().split("\t")
if not des.has_key(line[0]):
count += 1
continue
actual_all[line[0]] = []
actual_no6[line[0]] = []
for atom in line[1:]:
if "-" in atom:
continue
actual_all[line[0]].append(atom)
if valid(des[line[0]],atom):
actual_no6[line[0]].append(atom)
inf.close()
if count:
print "totally %d samples of %s are not in `des`"%(count, infile)
return actual_all,actual_no6
def load_predict(infile,des,mol_names):
predict = {}
inf = open(infile,'r')
line = inf.readline()
count = 0
prev_name = ""
for line in inf:
line = line.strip().split(",")
i = int(line[0])
name = mol_names[i-1]
if name!=prev_name and not des.has_key(name):
count += 1
prev_name = name
continue
if not predict.has_key(name):
predict[name] = []
#(atom,rank,score)
predict[name].append((line[1].split(".")[-1],int(line[2]),line[3]))
|
prev_name = name
inf.close()
for key,value in predict.iteritems():
value.sort(key=lambda x:x[1])
if count:
print "totally %d samples of %s a
|
re not in `des_file`"%(count,infile)
return predict
def evaluate(actual,predict,k):
total = 0
miss = 0
right = 0
for name in actual.iterkeys():
total += 1
if len(actual[name]) == 0:
miss += 1
continue
found = False
for item in predict[name][:k]:
if item[0] in actual[name]:
found = True
break
if found:
right += 1
return total,miss,right
main()
|
freecoder-zrw/leetcode
|
data_struct.py
|
Python
|
apache-2.0
| 737
| 0.008141
|
#coding:utf-8
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __unicode__(self):
return u'val:%s'%(self.val,)
def __str__(self):
return self.__unicode__().encode('utf-8')
@staticmethod
|
def array2list(a):
head, tail = None, None
for i in a:
if not head:
head = ListNode(i)
tail = head
else:
node = ListNode(i)
tail.next = node
tail = node
return head
def trace(self):
n
|
ode = self
while node:
print node.val,'->',
node = node.next
print 'end'
|
GenericMappingTools/gmt-python
|
examples/gallery/lines/vector_heads_tails.py
|
Python
|
bsd-3-clause
| 3,149
| 0.002223
|
"""
Vector heads and ta
|
ils
----------------------
Many modules in PyGMT allow plotting vectors with individual
heads and tails. For this purpose, several modifiers may be appended to
the corresponding vector-producing parameters for specifying the placement
of vector heads
|
and tails, their shapes, and the justification of the vector.
To place a vector head at the beginning of the vector path
simply append **+b** to the vector-producing option (use **+e** to place
one at the end). Optionally, append **t** for a terminal line, **c** for a
circle, **a** for arrow (default), **i** for tail, **A** for plain open
arrow, and **I** for plain open tail. Further append **l** or **r** (e.g.
``+bar``) to only draw the left or right half-sides of the selected head/tail
(default is both sides) or use **+l** or **+r** to apply simultaneously to both
sides. In this context left and right refer to the side of the vector line
when viewed from the beginning point to the end point of a line segment.
The angle of the vector head apex can be set using **+a**\ *angle*
(default is 30). The shape of the vector head can be adjusted using
**+h**\ *shape* (e.g. ``+h0.5``).
For further modifiers see the *Vector Attributes* subsection of the
corresponding module.
In the following we use the :meth:`pygmt.Figure.plot` method to plot vectors
with individual heads and tails. We must specify the modifiers (together with
the vector type, here ``v``, see also
:doc:`Vector types documentation </gallery/lines/vector_styles>`)
by passing the corresponding shortcuts to the ``style`` parameter.
"""
import pygmt
fig = pygmt.Figure()
fig.basemap(
region=[0, 10, 0, 15], projection="X15c/10c", frame='+t"Vector heads and tails"'
)
x = 1
y = 14
angle = 0 # in degrees, measured counter-clockwise from horizontal
length = 7
for vecstyle in [
# vector without head and tail (line)
"v0c",
# plain open arrow at beginning and end, angle of the vector head apex is set to 50
"v0.6c+bA+eA+a50",
# plain open tail at beginning and end
"v0.4c+bI+eI",
# terminal line at beginning and end, angle of vector head apex is set to 80
"v0.3c+bt+et+a80",
# arrow head at end
"v0.6c+e",
# circle at beginning and arrow head at end
"v0.6c+bc+ea",
# terminal line at beginning and arrow head at end
"v0.6c+bt+ea",
# arrow head at end, shape of vector head is set to 0.5
"v1c+e+h0.5",
# modified arrow heads at beginning and end
"v1c+b+e+h0.5",
# tail at beginning and arrow with modified vector head at end
"v1c+bi+ea+h0.5",
# half-sided arrow head (right side) at beginning and arrow at the end
"v1c+bar+ea+h0.8",
# half-sided arrow heads at beginning (right side) and end (left side)
"v1c+bar+eal+h0.5",
# half-sided tail at beginning and arrow at end (right side for both)
"v1c+bi+ea+r+h0.5+a45",
]:
fig.plot(
x=x, y=y, style=vecstyle, direction=([angle], [length]), pen="2p", color="red3"
)
fig.text(
x=6, y=y, text=vecstyle, font="Courier-Bold", justify="ML", offset="0.2c/0c"
)
y -= 1 # move the next vector down
fig.show()
|
UdK-VPT/Open_eQuarter
|
mole3/stat_corr/window_wall_ratio_south_AVG_by_building_age_lookup.py
|
Python
|
gpl-2.0
| 2,492
| 0.140506
|
# coding: utf8
# OeQ autogenerated lookup function for 'Window/Wall Ratio South in correlation to year of construction, based on the source data of the survey for the "German Building Typology developed by the "Institut für Wohnen und Umwelt", Darmstadt/Germany, 2011-2013'
import math
import numpy as np
from . import oeqLookuptable as oeq
def get(*xin):
l_lookup = oeq.lookuptable(
[
1849,0.055,
1850,0.055,
1851,0.055,
1852,0
|
.055,
1853,0.056,
1854,0.056,
1855,0.055,
1856,0.053,
1857,0.051,
1858,0.048,
1859,0.046,
1860,0.043,
1861,0.04,
1862,0.03
|
8,
1863,0.036,
1864,0.035,
1865,0.035,
1866,0.036,
1867,0.036,
1868,0.036,
1869,0.036,
1870,0.036,
1871,0.036,
1872,0.036,
1873,0.036,
1874,0.036,
1875,0.036,
1876,0.036,
1877,0.036,
1878,0.036,
1879,0.036,
1880,0.036,
1881,0.036,
1882,0.036,
1883,0.036,
1884,0.036,
1885,0.036,
1886,0.036,
1887,0.036,
1888,0.036,
1889,0.036,
1890,0.036,
1891,0.036,
1892,0.036,
1893,0.036,
1894,0.036,
1895,0.036,
1896,0.036,
1897,0.036,
1898,0.036,
1899,0.036,
1900,0.036,
1901,0.036,
1902,0.036,
1903,0.036,
1904,0.036,
1905,0.036,
1906,0.036,
1907,0.036,
1908,0.036,
1909,0.037,
1910,0.037,
1911,0.036,
1912,0.035,
1913,0.035,
1914,0.035,
1915,0.036,
1916,0.042,
1917,0.05,
1918,0.06,
1919,0.072,
1920,0.083,
1921,0.093,
1922,0.101,
1923,0.107,
1924,0.11,
1925,0.11,
1926,0.108,
1927,0.107,
1928,0.106,
1929,0.106,
1930,0.107,
1931,0.107,
1932,0.107,
1933,0.107,
1934,0.107,
1935,0.107,
1936,0.107,
1937,0.107,
1938,0.107,
1939,0.107,
1940,0.107,
1941,0.107,
1942,0.107,
1943,0.107,
1944,0.107,
1945,0.107,
1946,0.106,
1947,0.106,
1948,0.106,
1949,0.106,
1950,0.107,
1951,0.107,
1952,0.106,
1953,0.105,
1954,0.103,
1955,0.101,
1956,0.098,
1957,0.094,
1958,0.091,
1959,0.088,
1960,0.085,
1961,0.084,
1962,0.084,
1963,0.085,
1964,0.085,
1965,0.084,
1966,0.08,
1967,0.074,
1968,0.067,
1969,0.06,
1970,0.053,
1971,0.046,
1972,0.04,
1973,0.035,
1974,0.035,
1975,0.035,
1976,0.035,
1977,0.035,
1978,0.048,
1979,0.065,
1980,0.08,
1981,0.09,
1982,0.091,
1983,0.087,
1984,0.08,
1985,0.074,
1986,0.073,
1987,0.074,
1988,0.075,
1989,0.074,
1990,0.069,
1991,0.064,
1992,0.064,
1993,0.074,
1994,0.097,
1995,0.128,
1996,0.157,
1997,0.177,
1998,0.177,
1999,0.173,
2000,0.158,
2001,0.142,
2002,0.128,
2003,0.117,
2004,0.11,
2005,0.106,
2006,0.104,
2007,0.104,
2008,0.105,
2009,0.106,
2010,0.106,
2011,0.106,
2012,0.106,
2013,0.106,
2014,0.106,
2015,0.106,
2016,0.106,
2017,0.106,
2018,0.106,
2019,0.106,
2020,0.106,
2021,0.106])
return(l_lookup.lookup(xin))
|
xin3liang/platform_external_chromium_org
|
native_client_sdk/src/build_tools/build_projects.py
|
Python
|
bsd-3-clause
| 11,275
| 0.012594
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import multiprocessing
import optparse
import os
import posixpath
import sys
import urllib2
import buildbot_common
import build_version
import generate_make
import parse_dsc
from build_paths import SDK_SRC_DIR, OUT_DIR, SDK_RESOURCE_DIR
from build_paths import GSTORE
from generate_index import LandingPage
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
import getos
MAKE = 'nacl_sdk/make_3.99.90-26-gf80222c/make.exe'
LIB_DICT = {
'linux': [],
'mac': [],
'win': ['x86_32']
}
VALID_TOOLCHAINS = [
'bionic',
'newlib',
'glibc',
'pnacl',
'win',
'linux',
'mac',
]
# Global verbosity setting.
# If set to True (normally via a command line arg) then build_projects will
# add V=1 to all calls to 'make'
verbose = False
def Trace(msg):
if verbose:
sys.stderr.write(str(msg) + '\n')
def CopyFilesFromTo(filelist, srcdir, dstdir):
for filename in filelist:
srcpath = os.path.join(srcdir, filename)
dstpath = os.path.join(dstdir, filename)
buildbot_common.CopyFile(srcpath, dstpath)
def UpdateHelpers(pepperdir, clobber=False):
tools_dir = os.path.join(pepperdir, 'tools')
if not os.path.exists(tools_dir):
buildbot_common.ErrorExit('SDK tools dir is missing: %s' % tools_dir)
exampledir = os.path.join(pepperdir, 'examples')
if clobber:
buildbot_common.RemoveDir(exampledir)
buildbot_common.MakeDir(exampledir)
# Copy files for individual build and landing page
files = ['favicon.ico', 'httpd.cmd', 'index.css', 'index.js',
'button_close.png', 'button_close_hover.png']
CopyFilesFromTo(files, SDK_RESOURCE_DIR, exampledir)
# Copy tools scripts and make includes
buildbot_common.CopyDir(os.path.join(SDK_SRC_DIR, 'tools', '*.py'),
tools_dir)
buildbot_common.CopyDir(os.path.join(SDK_SRC_DIR, 'tools', '*.mk'),
tools_dir)
# Copy tools/lib scripts
tools_lib_dir = os.path.join(pepperdir, 'tools', 'lib')
buildbot_common.MakeDir(tools_lib_dir)
buildbot_common.CopyDir(os.path.join(SDK_SRC_DIR, 'tools', 'lib', '*.py'),
tools_lib_dir)
# On Windows add a prebuilt make
if getos.GetPlatform() == 'win':
buildbot_common.BuildStep('Add MAKE')
make_url = posixpath.join(GSTORE, MAKE)
make_exe = os.path.join(tools_dir, 'make.exe')
with open(make_exe, 'wb') as f:
f.write(urllib2.urlopen(make_url).read())
def ValidateToolchains(toolchains):
invalid_toolchains = set(toolchains) - set(VALID_TOOLCHAINS)
if invalid_toolchains:
buildbot_common.ErrorExit('Invalid toolchain(s): %s' % (
', '.join(invalid_toolchains)))
def GetDeps(projects):
out = {}
# Build list of all project names
localtargets = [proj['NAME'] for proj in projects]
# For each project
for proj in projects:
deplist = []
# generate a list of dependencies
for targ in proj.get('TARGETS', []):
deplist.extend(targ.get('DEPS', []) + targ.get('LIBS', []))
# and add dependencies to targets built in this subtree
localdeps = [dep for dep in deplist if dep in localtargets]
if localdeps:
out[proj['NAME']] = localdeps
return out
def UpdateProjects(pepperdir, project_tree, toolchains,
clobber=False, configs=None, first_toolchain=False):
if configs is None:
configs = ['Debug', 'Release']
if not os.path.exists(os.path.join(pepperdir, 'tools')):
buildbot_common.ErrorExit('Examples depend on missing tools.')
if not os.path.exists(os.path.join(pepperdir, 'toolchain')):
buildbot_common.ErrorExit('Examples depend on missing toolchains.')
ValidateToolchains(toolchains)
# Create the library output directories
libdir = os.path.join(pepperdir, 'lib')
platform = getos.GetPlatform()
for config in configs:
for arch in LIB_DICT[platform]:
dirpath = os.path.join(libdir, '%s_%s_host' % (platform, arch), config)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
landing_page = None
for branch, projects in project_tree.iteritems():
dirpath = os.path.join(pepperdir, branch)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
targets = [desc['NAME'] for desc in projects]
deps = GetDeps(projects)
# Generate master make for this branch of projects
generate_make.GenerateMasterMakefile(pepperdir,
os.path.join(pepperdir, branch),
targets, deps)
if branch.startswith('examples') and not landing_page:
landing_page = LandingPage()
# Generate individual projects
for desc in projects:
srcroot = os.path.dirname(desc['FILEPATH'])
generate_make.ProcessProject(pepperdir, srcroot, pepperdir, desc,
toolchains, configs=configs,
first_toolchain=first_toolchain)
if branch.startswith('examples'):
landing_page.AddDesc(desc)
if landing_page:
# Generate the landing page text file.
index_html = os.path.join(pepperdir, 'examples', 'index.html')
index_template = os.path.join(SDK_RESOURCE_DIR, 'index.html.template')
with open(index_html, 'w') as fh:
out = landing_page.GeneratePage(index_template)
fh.write(out)
# Generate top Make for examples
targets = ['api', 'demo', 'getting_started', 'tutorial']
targets = [x for x in targets if 'examples/'+x in project_tree]
branch_name = 'examples'
generate_make.GenerateMasterMakefile(pepperdir,
os.path.join(pepperdir, branch_name),
targets, {})
def BuildProjectsBranch(pepperdir, branch, deps, clean, config, args=None):
make_dir = os.path.join(pepperdir, branch)
print "\nMake: " + make_dir
if getos.GetPlatform() == 'win':
# We need to modify the environment to build host on Windows.
make = os.path.join(make_dir, 'make.bat')
else:
make = 'make'
env = None
if os.environ.get('USE_GOMA') == '1':
env = dict(os.environ)
env['NACL_COMPILER_PREFIX'] = 'gomacc'
# Add -m32 to the CFLAGS when building using i686-nacl-gcc
# otherwise goma won't recognise it as different to the x86_64
# build.
env['X86_32_CFLAGS'] = '-m32'
env['X86_32_CXXFLAGS'] = '-m32'
jobs = '50'
else:
jobs = str(multiprocessing.cpu_count())
make_cmd = [make, '-j', jobs]
make_cmd.append('CONFIG='+config)
# We always ENABLE_BIONIC in case we need it. If neither --bionic nor
# -t bionic have been provided
|
on the command line, then VALID_TOOLCHAINS
# will not contain a bionic target.
make_cmd.append('ENABLE_BIONIC=1')
if not deps:
make_cmd.append('IGNORE_DEPS=1')
if verbose:
make_c
|
md.append('V=1')
if args:
make_cmd += args
else:
make_cmd.append('TOOLCHAIN=all')
buildbot_common.Run(make_cmd, cwd=make_dir, env=env)
if clean:
# Clean to remove temporary files but keep the built
buildbot_common.Run(make_cmd + ['clean'], cwd=make_dir, env=env)
def BuildProjects(pepperdir, project_tree, deps=True,
clean=False, config='Debug'):
# Make sure we build libraries (which live in 'src') before
# any of the examples.
build_first = [p for p in project_tree if p != 'src']
build_second = [p for p in project_tree if p == 'src']
for branch in build_first + build_second:
BuildProjectsBranch(pepperdir, branch, deps, clean, config)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('-c', '--clobber',
help='Clobber project directories before copying new files',
action='store_true', default=False)
parser.add_option('-b', '--build',
help='Build the projects. Otherwise the projects are only copied.',
action='store_true')
parser.add_option('--config',
help='Choose configuration to build (Debug or Release). Builds both '
'by default')
parser.add_option('--bionic',
help='Enable bionic projects', action='store
|
jthurst3/MemeCaptcha
|
MemeScrapers/MemeScrapers/items.py
|
Python
|
mit
| 291
| 0
|
# -*- coding: utf-8 -*-
# Define here the models
|
for your scraped ite
|
ms
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class MemescrapersItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
mallconnectionorg/openerp
|
rrhh/l10n_cl_banks/__openerp__.py
|
Python
|
agpl-3.0
| 1,624
| 0.005552
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have
|
received a copy of the GNU Affero General Public License
# along with this p
|
rogram. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Instituciones Financieras Chile',
'version': '1.0',
'category': 'Localization/Chile',
"description": """
Fichas de Bancos y Cooperativas, establecidos por SBIF
- Bancos Establecidos en Chile
- Cooperativas de Ahorro y Crédito
- Bancos Estatales
- Sucursales de Bancos Extranjeros
""",
'author': 'Iván Masías - ivan.masias.ortiz@gmail.com, Rev. Pedro Arroyo<parroyo@mallconnection.com>',
'website': '',
'depends': [ 'base'],
'data': [
'data/res.bank.csv',
'view/res_bank.xml'
],
'installable': True,
'active': False,
}
|
PersianWikipedia/fawikibot
|
laupdate.py
|
Python
|
mit
| 3,731
| 0.001971
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Distributed under the terms of MIT License (MIT)
import pywikibot
import time
from pywikibot.data.api import Request
import re
site = pywikibot.Site('fa', fam='wikipedia')
print "Fetching admins list"
data = Request(site=site, action="query", list="allusers", augroup="sysop", aulimit=500).submit()
adminsac = []
adminbots = ["Dexbot"]
adminsdiac = {}
for admin in data["query"]["allusers"]:
admin = admin["name"]
if admin in adminbots:
continue
acaction = []
dcaction = []
actions = "block, protect, rights, delete, upload, import, renameuser".split(
", ")
for adminaction in actions:
data1 = Request(site=site, action="query", list="logevents",
leuser=admin, letype=adminaction).submit()
for action in data1["query"]["logevents"]:
times = action["timestamp"].split("T")[0].split("-")
today = time.strftime('%Y/%m/%d').split("/")
diff = ((int(today[0]) - int(times[0])) * 365) + (
(int(today[1]) - int(times[1])) * 30) + (int(today[2]) - int(times[2]))
if diff < 90:
acaction.append(
action["timestamp"].split("T")[0].replace("-", ""))
else:
dcaction.append(
action["timestamp"].split("T")[0].replace("-", ""))
thmag = {"y": int(time.strftime('%Y')), "m": int(
time.strftime('%m')), "d": int(time.strftime('%d'))}
if (int(thmag["m"]) - 3) <= 0:
thmag["y"] = thmag["y"] - 1
thmag["m"] = thmag["m"] + 9
else:
thmag["m"] = thmag["m"] - 3
if thmag["m"] < 10:
thmag["m"] = "0" + str(thmag["m"])
if thmag["d"] < 10:
thmag["d"] = "0" + str(thmag["d"])
thmag1 = [str(thmag["y"]), str(thmag["m"]), str(thmag["d"])]
data2 = Request(site=site, action="query", list="usercontribs", ucuser=admin,
ucnamespace=8, ucend="%sT00:00:00Z" % "-".join(thmag1)).submit()
for actionmw in data2["query"]["usercontribs"]:
acaction.append(actionmw["timestamp"].split("T")[0].replace("-", ""))
if len(acaction) >= 10:
if re.search(ur"[ابپتثجچحخدذرزژس
|
شصضطظعغفقکگلمنوهیآ]", admin[0]):
adminsac.append(u"!!!!!!!!!!!!!!!!!!!!!!!!!!!" + admin)
else:
adminsac.append(admin)
else:
acaction.sort()
dcaction.sort()
if re.search(ur"[ابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیآ]", admin[0]):
admin = u"!!!!!!!!!!!!!!!!!!!!!!!!!!!" + admin
try:
adminsdiac[admin] = acaction[-1]
except:
adminsdiac[admin] = dcaction[
|
-1]
pywikibot.output(admin)
adminsac.sort()
activetext = u"\n{{ویکیپدیا:فهرست مدیران/سطرف|" + \
u"}}\n{{ویکیپدیا:فهرست مدیران/سطرف|".join(adminsac) + u"}}"
deactivetext = u"\n"
activetext = activetext.replace(u"!!!!!!!!!!!!!!!!!!!!!!!!!!!", u"")
ak = adminsdiac.keys()
ak.sort()
for admin in ak:
deactivetext = deactivetext + \
u"{{ویکیپدیا:فهرست مدیران/سطرغ|" + admin + \
u"|" + adminsdiac[admin] + u"}}\n"
deactivetext = deactivetext.replace(u"!!!!!!!!!!!!!!!!!!!!!!!!!!!", u"")
page = pywikibot.Page(site, u"ویکیپدیا:فهرست مدیران")
text = page.get()
pywikibot.output(deactivetext)
new_text = text.replace(text.split(u"<!-- Active -->")[1], activetext + u"\n")
new_text = new_text.replace(u"<!-- Deactive -->" + text.split(
u"<!-- Deactive -->")[1], u"<!-- Deactive -->" + deactivetext + u"\n")
page.put(new_text, u"ربات: بروزرسانی فهرست")
|
silenci/neutron
|
neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/test_qos_driver.py
|
Python
|
apache-2.0
| 3,786
| 0.001321
|
# Copyright 2015 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may
|
not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
i
|
mport mock
from oslo_utils import uuidutils
from neutron import context
from neutron.objects.qos import policy
from neutron.objects.qos import rule
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions
from neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers import (
qos_driver)
from neutron.tests import base
class QosSRIOVAgentDriverTestCase(base.BaseTestCase):
ASSIGNED_MAC = '00:00:00:00:00:66'
PCI_SLOT = '0000:06:00.1'
def setUp(self):
super(QosSRIOVAgentDriverTestCase, self).setUp()
self.context = context.get_admin_context()
self.qos_driver = qos_driver.QosSRIOVAgentDriver()
self.qos_driver.initialize()
self.qos_driver.eswitch_mgr = mock.Mock()
self.qos_driver.eswitch_mgr.set_device_max_rate = mock.Mock()
self.qos_driver.eswitch_mgr.clear_max_rate = mock.Mock()
self.max_rate_mock = self.qos_driver.eswitch_mgr.set_device_max_rate
self.clear_max_rate_mock = self.qos_driver.eswitch_mgr.clear_max_rate
self.rule = self._create_bw_limit_rule_obj()
self.qos_policy = self._create_qos_policy_obj([self.rule])
self.port = self._create_fake_port()
def _create_bw_limit_rule_obj(self):
rule_obj = rule.QosBandwidthLimitRule()
rule_obj.id = uuidutils.generate_uuid()
rule_obj.max_kbps = 2
rule_obj.max_burst_kbps = 200
rule_obj.obj_reset_changes()
return rule_obj
def _create_qos_policy_obj(self, rules):
policy_dict = {'id': uuidutils.generate_uuid(),
'tenant_id': uuidutils.generate_uuid(),
'name': 'test',
'description': 'test',
'shared': False,
'rules': rules}
policy_obj = policy.QosPolicy(self.context, **policy_dict)
policy_obj.obj_reset_changes()
return policy_obj
def _create_fake_port(self):
return {'port_id': uuidutils.generate_uuid(),
'profile': {'pci_slot': self.PCI_SLOT},
'device': self.ASSIGNED_MAC}
def test_create_rule(self):
self.qos_driver.create(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_update_rule(self):
self.qos_driver.update(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_delete_rules(self):
self.qos_driver.delete(self.port, self.qos_policy)
self.clear_max_rate_mock.assert_called_once_with(self.PCI_SLOT)
def test__set_vf_max_rate_captures_sriov_failure(self):
self.max_rate_mock.side_effect = exceptions.SriovNicError()
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
def test__set_vf_max_rate_unknown_device(self):
with mock.patch.object(self.qos_driver.eswitch_mgr, 'device_exists',
return_value=False):
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
self.assertFalse(self.max_rate_mock.called)
|
OCA/l10n-switzerland
|
l10n_ch_account_tags/__manifest__.py
|
Python
|
agpl-3.0
| 561
| 0
|
# Copy
|
right 2019 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
{
"name": "Switz
|
erland Account Tags",
"category": "Localisation",
"summary": "",
"version": "14.0.1.0.0",
"author": "Camptocamp SA, Odoo Community Association (OCA)",
"website": "https://github.com/OCA/l10n-switzerland",
"license": "AGPL-3",
"depends": ["l10n_ch"],
"data": [
"data/new/account.account.tag.csv",
"data/new/account.account.template.csv",
"data/update/account.account.template.csv",
],
}
|
mjwtom/swift
|
test/dedupe/bin/download-debug.py
|
Python
|
apache-2.0
| 283
| 0.003534
|
#!/home/mjwtom/install/
|
python/bin/python
# -*- coding: utf-8 -*-
'''
Created on Jan 12, 2015
@author: mjwtom
'''
import os
if __name__ == '__main__':
os.system('/home/mjwtom/bin/swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K testing download mjw home/mjwtom/
|
file')
|
Zerknechterer/pyload
|
module/plugins/hoster/ZeveraCom.py
|
Python
|
gpl-3.0
| 883
| 0.011325
|
# -*- coding: utf-8 -*-
import re
import urlparse
from module.plugins.internal.MultiHoster import MultiHoster, create_getInfo
class ZeveraCom(MultiHoster):
__name__ = "ZeveraCom"
__type__ = "hoster"
__version__ = "0.31"
__pattern__ = r'https?://(?:www\.)zevera\.com/(getFiles\.ashx|Members/download\.ashx)\?.*ourl=.+'
__config__ = [("use_premium", "bool", "Use premium account if available",
|
True)]
__description__ = """Zevera.com multi-hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz"),
("Walter Purcaro", "vuolter@gmail.com")]
FILE_ERRORS = [("Error", r'action="ErrorDownload.aspx')]
def handlePremium(self, pyfile):
|
self.link = "https://%s/getFiles.ashx?ourl=%s" % (self.account.HOSTER_DOMAIN, pyfile.url)
getInfo = create_getInfo(ZeveraCom)
|
yephper/django
|
tests/timezones/tests.py
|
Python
|
bsd-3-clause
| 59,648
| 0.001794
|
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from contextlib import contextmanager
from unittest import SkipTest, skipIf
from xml.dom.
|
minidom import parseString
from django.contrib.auth.models import
|
User
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, connections
from django.db.models import Max, Min
from django.http import HttpRequest
from django.template import (
Context, RequestContext, Template, TemplateSyntaxError, context_processors,
)
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, override_settings,
skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.urls import reverse
from django.utils import six, timezone
from .forms import (
EventForm, EventLocalizedForm, EventLocalizedModelForm, EventModelForm,
EventSplitForm,
)
from .models import (
AllDayEvent, Event, MaybeEvent, Session, SessionEvent, Timestamp,
)
try:
import pytz
except ImportError:
pytz = None
requires_pytz = skipIf(pytz is None, "this test requires pytz")
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9,
|
jstitch/gift_circle
|
GiftCircle/GiftCircle/wsgi.py
|
Python
|
gpl-3.0
| 397
| 0
|
"""
WSGI config for GiftCircle project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information o
|
n this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'GiftCircle.se
|
ttings')
application = get_wsgi_application()
|
joshchea/python-tdm
|
scripts/CalcLogitChoice.py
|
Python
|
mit
| 12,741
| 0.015462
|
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------#
# Name: CalcLogitChoice
# Purpose: Utilities for various calculations of different types of choice models.
# a) CalcMultinomialChoice : Calculates a multinomial choice model probability given a dictionary of mode utilities
# b) CalcPivotPoint : Calculates pivot point choice probability given base utilities, current utilities and base proabilities
# c) CalcNestedChoice : Calculates n-level nested mode choice probabilities given dictionary with tree definition, matrix references and number of zones
# d) CalcNestedChoiceFlat : Calculate nested choice on flat array so it can be used for stuff like microsim ABM etc... e) can in general be easily modified for this
# **All input vectors are expected to be numpy arrays
#
# Author: Chetan Joshi, Portland OR
# Dependencies:numpy [www.numpy.org], math, time
# Created: 5/14/2015
#
# Copyright: (c) Chetan Joshi 2015
# Licence: Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------#
import numpy
import time
import math
#from memory_profiler import profile
def CalcMultinomialChoice(Utils, getLogSumAccess = 0):
'''Utils = Dictionary of utility matrices for each mode
ex. Utils = {'auto':mat1, 'transit':mat2, 'bike':mat3, 'walk':mat4}
getLogSumAccess (optional, accessibility log sum) 0=no, <>0=yes
'''
Probs = {}
eU = {}
eU_total = numpy.zeros(Utils[Utils.keys()[0]].shape)
for key in Utils.keys():
eU[key] = numpy.exp(Utils[key])
eU_total+=eU[key]
if getLogSumAccess <> 0:
lnSumAccess = numpy.log(eU_total)
eU_total[eU_total == 0] = 0.0001
for key in eU.keys():
Probs[key] = eU[key]/eU_total
del eU, eU_total, Utils
if getLogSumAccess == 0:
return Probs
else:
return Probs, lnSumAccess
def CalcPivotPoint(Utils, Po):
'''
Utils = Update
|
d delta utility matrices in a dictionary i.e delta of Uk (k = mode)
ex. Utils = {'auto':mat1, 'transit':mat2, 'bike':mat3, 'walk':mat4}
Po = Base probabilities in a dictionary
ex. Po = {'auto':mat1, 'transit':mat2, 'bike':mat3, 'walk':mat4}
'''
Probs = {}
PeU = {}
PeU_total = numpy.zero
|
s(Utils[Utils.keys()[0]].shape)
for key in Utils.keys():
PeU[key] = Po[key]*numpy.exp(Utils[key])
PeU_total+=PeU[key]
PeU_total[PeU_total == 0] = 0.0001
for key in PeU.keys():
Probs[key] = PeU[key]/PeU_total
del PeU, PeU_total, Utils
return Probs
#@profile
def CalcNestedChoice(TreeDefn, MatRefs, numZn, getLogSumAccess = 0):
'''
#TreeDefn = {(0,'ROOT'):[1.0,['AU', 'TR', 'AC']],
# (1,'AU'):[0.992,['CD', 'CP']],
# (1,'TR'):[0.992,['TB', 'TP']],
# (1,'AC'):[0.992,['BK', 'WK']]}
#
#Key-> (Level ID, Level Code): Values-> (LogSum Parameter enters as: 1/lambda, SubLevel IDs)
# ROOT should always be ID = 0 and Code = 'ROOT'
# ROOT
# / | \
# / | \
# / | \
# AU TR AC(logsum parameter)
# /\ /\ /\
# CD CP TB TP BK WK
#
#MatRefs = {'ROOT': 1.0, 'AU':0, 'TR':0, 'AC':0,
# 'CD':Ucd), 'CP':Ucp),
# 'TB':Utb), 'TP':Utp),
# 'BK':Ubk), 'WK':Uwk)} Stores utilities in dict of matrices, base level utilities are pre-specified!!
#
#numZn = number of zones
#
#getLogSumAccess (optional, accessibility log sum) 0=no, <>0=yes
'''
#ProbMats = {'ROOT': 1.0, 'AU':0, 'TR':0, 'AC':0, 'CD':0, 'CP':0, 'TB':0, 'TP':0, 'BK':0, 'WK':0} #Stores probabilities at each level
#TripMat = GetMatrixRaw(Visum, tripmatno) #--> Input trip distribution matrix
#numZn = Visum.Net.Zones.Count
ProbMats = dict(zip(MatRefs.keys(), numpy.zeros(len(MatRefs.keys()))))
ProbMats['ROOT'] = 1.0
#Utility calculator going up...
#print 'Getting logsums and utilities...'
for key in sorted(TreeDefn.keys(), reverse= True):
#print key, TreeDefn[key]
sumExp = numpy.zeros((numZn,numZn))
sublevelmat_codes = TreeDefn[key][1] #produces --> ex. ['WB', 'WX', 'DX']
for code in sublevelmat_codes:
#print ([code, TreeDefn[key][0]])
MatRefs[code] = MatRefs[code]/TreeDefn[key][0] #---> scale the utility
sumExp+=numpy.exp(MatRefs[code])
lnSum = sumExp.copy() #Maybe there is a better way of doing the next 4 steps in 1 shot
lnSum[sumExp == 0] = 0.000000001
lnSum = numpy.log(lnSum)
lnSum[sumExp == 0] = -999
MatRefs[key[1]] = TreeDefn[key][0]*lnSum #---> Get ln sum of sublevel
#Probability going down...
#print 'Getting probabilities...'
for key in sorted(TreeDefn.keys()):
#print key, TreeDefn[key]
eU_total = numpy.zeros((numZn,numZn))
sublevelmat_codes = TreeDefn[key][1] #1st set--> ROOT : AU, TR
for code in sublevelmat_codes:
#print ([code, TreeDefn[key][0]])
eU_total+=numpy.exp(MatRefs[code])
eU_total[eU_total == 0] = 0.0001 #Avoid divide by 0 error
## for code in sublevelmat_codes:
## ProbMats[code] = ProbMats[key[1]]*numpy.exp(MatRefs[code])/eU_total
nSublevels = len(sublevelmat_codes)
cumProb = 0
for i in xrange(nSublevels - 1):
code = sublevelmat_codes[i]
temp = numpy.exp(MatRefs[code])/eU_total
ProbMats[code] = ProbMats[key[1]]*temp
cumProb+=temp
code = sublevelmat_codes[i+1]
ProbMats[code] = ProbMats[key[1]]*(1.0-cumProb)
if getLogSumAccess == 0:
return ProbMats
else:
return ProbMats, MatRefs['ROOT']
def CalcNestedChoiceFlat(TreeDefn, MatRefs, vecLen, getLogSumAccess = 0):
'''
#TreeDefn = {(0,'ROOT'):[1.0,['AU', 'TR', 'AC']],
# (1,'AU'):[0.992,['CD', 'CP']],
# (1,'TR'):[0.992,['TB', 'TP']],
# (1,'AC'):[0.992,['BK', 'WK']]}
#
#Key-> (Level ID, Level Code): Values-> (LogSum Parameter enters as: 1/lambda, SubLevel IDs)
# ROOT should always be ID = 0 and Code = 'ROOT'
|
Micronaet/micronaet-migration
|
accounting_statistic_base/etl/__MOVED_SCHEDULED__/posta.py
|
Python
|
agpl-3.0
| 2,279
| 0.011847
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# I find this on site: http://snippets.dzone.com/posts/show/2038
# sorry but, as the publisher, I don't know the origin and the owner of this code
# Let me tell him thank you, very useful code ;)
#
# The procedure send an E-mail to recipient list with recipient attachment
# The server provider use standard 25 port and no autentication
#
# Ex.:send_mail("riolini@micronaet.it",["info@micronaet.it",],"Prova","Messaggio di prova",["/home/administrator/example.log",],"192.168.100.254")
import smtplib, os
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
def send_mail(send_from, send_to, subject, text, files=[], server="localhost", username = '', password = '', TLS = False):
''' Funzione per inviare mail attraverso un server SMTP (con auth o no)
'''
# --------------------------
# - Preparazione messaggio -
# --------------------------
assert type(send_to)==list
assert type(files)==list
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = COMMASPACE.join(send_to)
msg['Date'] = for
|
matdate(localtime=True)
msg['Subject'] = subject
msg.attach( MIMEText(text) )
for f in files:
part = MIMEBase('application', "octet-stream")
part.set_payload(open(f,"rb").read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
msg.attach(part)
|
# --------------
# - Invio mail -
# --------------
if not username: # invio senza autenticazione:
smtp = smtplib.SMTP(server)
#smtp.login(user, password)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.close()
elif TLS: # invio con autenticazione TLS
smtp = smtplib.SMTP("%s:%s" % (server, port))
smtp.starttls()
smtp.login(username, password)
smtp.sendmail(fromaddr, send_to, msg)
smtp.quit()
else:
pass # per adesso non necessario
def raise_error(text, file_name):
print text
file_name.write(text + "\n")
return
|
Puyb/inscriptions_roller
|
inscriptions/migrations/0051_auto_20190131_2038.py
|
Python
|
gpl-3.0
| 978
| 0.002047
|
# Generated by Django 2.0 on 2019-01-31 19:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inscriptions', '0050_mer
|
ge_20190131_0016'),
]
operations = [
migrations.AlterField(
model_name='equipier',
name='cerfa_valide',
field=models.BooleanField(verbose_name='Cerfa QS-SPORT'),
),
migrations.AlterField(
model_name='equipier',
name='piece_jointe',
field=models.FileField(blank=True, up
|
load_to='certificats', verbose_name='Certificat ou licence'),
),
migrations.AlterField(
model_name='templatemail',
name='destinataire',
field=models.CharField(choices=[('Equipe', "Gerant d'équipe"), ('Equipier', 'Equipier'), ('Organisateur', 'Organisateur'), ('Paiement', 'Paiement'), ('Tous', 'Tous')], max_length=20, verbose_name='Destinataire'),
),
]
|
sani-coop/tinjaca
|
addons/solicitudes/models/requisitos_garantia.py
|
Python
|
gpl-2.0
| 648
| 0.006182
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
class RequisitosGarantia(models.Model):
_name = 'solicitudes.requisitos_garantia'
solicitudes_id = fields.Many2one('solicitudes.solicitudes', string="Número de expediente")
documentos_garantia_id = fields.Many2one('politicas.documentos_garantia', string="Tipo de Documento")
documento = fields.Binary(string='Documento')
observ
|
aciones = fields.Char(string='Observaciones')
valido = fields.Boolean(string='Valido')
solicitudes_tipos_garantia_id = fields.Many2one(string='Garantia', related='solicitudes_id.propuestas
|
_tipos_garantia_id', readonly=True)
|
SUNET/eduid-common
|
src/eduid_common/api/decorators.py
|
Python
|
bsd-3-clause
| 7,549
| 0.002119
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import inspect
import warnings
from functools import wraps
from flask import abort, jsonify, request
from marshmallow.exceptions import ValidationError
from six import string_types
from werkzeug.wrappers import Response as WerkzeugResponse
from eduid_common.api.messages import FluxData, error_response
from eduid_common.api.schemas.models import FluxFailResponse, FluxResponseStatus, FluxSuccessResponse
from eduid_common.api.utils import get_user
from eduid_common.session import session
__author__ = 'lundberg'
def require_eppn(f):
@wraps(f)
def require_eppn_decorator(*args, **kwargs):
eppn = session.get('user_eppn', None)
# If the user is logged in and has a session
# pass on the request to the decorated view
# together with the eppn of the logged in user.
if eppn:
kwargs['eppn'] = eppn
return f(*args, **kwargs)
abort(401)
return require_eppn_decorator
def require_user(f):
@wraps(f)
def require_user_decorator(*args, **kwargs):
user = get_user()
kwargs['user'] = user
return f(*args, **kwargs)
return require_user_decorator
def can_verify_identity(f):
@wraps(f)
def verify_identity_decorator(*args, **kwargs):
user = get_user()
# For now a user can just have one verified NIN
if user.nins.primary is not None:
# TODO: Make this a CommonMsg I guess
return error_response(message='User is already verified')
# A user can not verify a nin if another previously was verified
locked_nin = user.locked_identity.find('nin')
if locked_nin and locked_nin.number != kwargs['nin']:
# TODO: Make this a CommonMsg I guess
return error_response(message='Another nin is already registered for this user')
return f(*args, **kwargs)
return verify_identity_decorator
class MarshalWith(object):
"""
Decorator to format the data returned from a Flask view and ensure it conforms to a marshmallow schema.
A common usage is to use this to format the response as a Flux Standard Action
(https://github.com/redux-utilities/flux-standard-action) by using a schema that has FluxStandardAction
as superclass, or as a mixin.
See the documentation of the FluxResponse class, or the link above, for more information about the
on-the-wire format of these Flux Standard Actions.
"""
def __init__(self, schema):
self.schema = schema
def __call__(self, f):
@wraps(f)
def marshal_decorator(*args, **kwargs):
# Call the Flask view, which is expected to return a FluxData instance,
# or in special cases an WerkzeugResponse (e.g. when a redirect is performed).
ret = f(*args, **kwargs)
if isinstance(ret, WerkzeugResponse):
# No need to Marshal again, someone else already did that
return ret
if isinstance(ret, dict):
# TODO: Backwards compatibility mode - work on removing the need for this
ret = FluxData(FluxResponseStatus.OK, payload=ret)
if not isinstance(ret, FluxData):
raise TypeError('Data returned from Flask view was not a FluxData (or WerkzeugResponse) instance')
if ret.status != FluxResponseStatus.OK:
_flux_response = FluxFailResponse(request, payload=ret.payload)
else:
_flux_response = FluxSuccessResponse(request, payload=ret.payload)
return jsonify(self.schema().dump(_flux_response.to_dict()))
return marshal_decorator
class UnmarshalWith(object):
def __init__(self, schema):
self.schema = schema
def __call__(self, f):
@wraps(f)
def unmarshal_decorator(*args, **kwargs):
try:
json_data = request.get_json()
if json_data is None:
json_data = {}
unmarshal_result = self.schema().load(json_data)
kwargs.update(unmarshal_result)
return f(*args, **kwargs)
except ValidationError as e:
response_data = FluxFailResponse(
request, payload={'error': e.normalized_messages(), 'csrf_token': session.get_csrf_token()}
)
return jsonify(response_data.to_dict())
return unmarshal_decorator
# https://stackoverflow.com/questions/2536307/how-do-i-deprecate-python-functions/40301488#40301488
def deprecated(reason):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
"""
if isinstance(reason, string_types):
# The @deprecated is used with a 'reason'.
#
# .. code-block:: python
#
# @deprecated("please, use another function")
# def old_function(x, y):
# pass
def decorator(func1):
if inspect.isclass(func1):
fmt1 = "Call to deprecated class {name} ({reason})."
else:
fmt1 = "Call to deprecated function {name} ({reason})."
@wraps(func1)
def new_func1(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
fmt1.format(name=func1.__name__, reason=reason), category=DeprecationWarning, stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
return func1(*args, **kwargs)
return new_func1
return decorator
elif inspect.isclass(reason) or inspect.isfunction(reason):
# The @deprecated is used without any 'reason'.
#
# .. code-block:: python
#
# @deprecated
# def old_function(x, y):
# pass
func2 = reason
if inspect.isclass(func2):
fmt2 = "Call to deprecated class {name}."
else:
fmt2 = "Call to deprecated function {name}."
@wraps(func2)
def new_func2(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(fmt2.format(name=func2.__name__), category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', D
|
eprecationWarning)
return func2(*args, **kwargs)
return new_func2
else:
raise TypeError(repr(type(reason)))
@deprecated('Use eduid_common.api.decorators.deprecated instead')
class Deprecated(object):
"""
|
Mark deprecated functions with this decorator.
Attention! Use it as the closest one to the function you decorate.
:param message: The deprecation message
:type message: str | unicode
"""
def __init__(self, message=None):
self.message = message
def __call__(self, func):
if self.message is None:
self.message = 'Deprecated function {!r} called'.format(func.__name__)
@wraps(func)
def new_func(*args, **kwargs):
warnings.warn(self.message, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
# work around a bug in functools.wraps thats fixed in python 3.2
if getattr(new_func, '__wrapped__', None) is None:
new_func.__wrapped__ = func
return new_func
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/gtk/_gtk/TextWindowType.py
|
Python
|
gpl-2.0
| 767
| 0.007823
|
# encoding: utf-8
# module gtk._gtk
# from /usr/lib/python2.7/dist-packages/gtk-2.0/gtk/_gtk.so
# by generator 1.135
# no doc
# imports
import atk as __atk
import gio as __gio
im
|
port gobject as __gobject
import gobject._gobject as __gobject__gobject
class TextWindowType(__gobject.GEnum):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
__enum_values__ = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
|
5: 5,
6: 6,
}
__gtype__ = None # (!) real value is ''
|
ctk3b/mdtraj
|
mdtraj/formats/amberrst.py
|
Python
|
lgpl-2.1
| 33,272
| 0.002495
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Jason Swails
# Contributors:
#
# This code for reading Amber restart and inpcrd files was taken from ParmEd,
# which is released under the GNU Lesser General Public License
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
This module provides the ability to read Amber inpcrd/restart files as well as
Amber NetCDF restart files. This code was taken from ParmEd and simplified by
removing the functionality that is not
|
needed.
"""
from __future__ import print_function, division
from distutils.version import StrictVersion
from math import ceil
import os
import warnings
import numpy as np
from mdtraj import version
from mdtraj.formats.registry import FormatRegistry
from mdtraj.utils import ensure_type, import_, in_units_of, cast_indices, six
__all__ = ['AmberRestartFile', 'load_
|
restrt', 'AmberNetCDFRestartFile',
'load_ncrestrt']
range = six.moves.range
@FormatRegistry.register_loader('.rst7')
@FormatRegistry.register_loader('.restrt')
@FormatRegistry.register_loader('.inpcrd')
def load_restrt(filename, top=None, atom_indices=None):
"""Load an AMBER ASCII restart/inpcrd file. Since this file doesn't contain
information to specify the topology, you need to supply a topology
Parameters
----------
filename : str
name of the AMBER restart file
top : {str, Trajectory, Topology}
Pass in either the path to a file containing topology information (e.g.,
a PDB, an AMBER prmtop, or certain types of Trajectory objects) to
supply the necessary topology information that is not present in these
files
atom_indices : array_like, optional
If not None, then read only a subset of the atoms coordinates from the
file.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object
See Also
--------
mdtraj.AmberRestartFile : Low level interface to AMBER restart files
"""
from mdtraj.core.trajectory import _parse_topology
topology = _parse_topology(top)
atom_indices = cast_indices(atom_indices)
with AmberRestartFile(filename) as f:
return f.read_as_traj(topology, atom_indices=atom_indices)
@FormatRegistry.register_fileobject('.rst7')
@FormatRegistry.register_fileobject('.restrt')
@FormatRegistry.register_fileobject('.inpcrd')
class AmberRestartFile(object):
"""Interface for reading and writing AMBER ASCII restart files. This is a
file-like object, that supports both reading and writing depending on the
`mode` flag. It implements the context manager protocol, so you can also
use it with the python 'with' statement.
Parameters
----------
filename : str
The name of the file to open
mode : {'r', 'w'}, default='r'
The mode in which to open the file. Valid options are 'r' or 'w' for
'read' or 'write'
force_overwrite : bool, default=False
In write mode, if a file named `filename` already exists, clobber it and
overwrite it
See Also
--------
md.AmberNetCDFRestartFile : Low level interface to AMBER NetCDF-format
restart files
"""
distance_unit = 'angstroms'
def __init__(self, filename, mode='r', force_overwrite=True):
self._closed = True
self._mode = mode
self._filename = filename
if mode not in ('r', 'w'):
raise ValueError("mode must be one of ['r', 'w']")
if mode == 'w' and not force_overwrite and os.path.exists(filename):
raise IOError('"%s" already exists' % filename)
if mode == 'w':
self._needs_initialization = True
self._handle = open(filename, mode)
self._closed = False
elif mode == 'r':
with open(filename, mode) as f:
f.readline()
words = f.readline().split()
try:
self._n_atoms = int(words[0])
except (IndexError, ValueError):
raise TypeError('"%s" is not a recognized Amber restart' %
filename)
self._needs_initialization = False
else:
raise RuntimeError()
@property
def n_atoms(self):
self._validate_open()
if self._needs_initialization:
raise IOError('The file is uninitialized')
return self._n_atoms
@property
def n_frames(self):
return 1 # always 1 frame
def _validate_open(self):
if self._closed:
raise IOError('The file is closed.')
def _parse(self, lines):
""" Parses the file """
self._time = None
try:
words = lines[1].split()
self._n_atoms = natom = int(words[0])
except (IndexError, ValueError):
raise TypeError('not a recognized Amber restart')
time = None
if len(words) >= 2:
time = float(words[1])
lines_per_frame = int(ceil(natom / 2))
if len(lines) == lines_per_frame + 2:
hasbox = hasvels = False
elif natom in (1, 2) and len(lines) == 4:
# This is the _only_ case where line counting does not work -- there
# is either 1 or 2 atoms and there are 4 lines. The 1st 3 lines are
# the title, natom/time, and coordinates. The 4th are almost always
# velocities since it's hard to have a periodic system this small.
# However, velocities (which are scaled down by 20.445) have a ~0%
# chance of being 60+, so we can pretty easily tell if the last line
# has box dimensions and angles or velocities. I cannot envision a
# plausible scenario where the detection here will ever fail
line = lines[3]
if natom == 1:
tmp = [line[i:i+12] for i in range(0, 72, 12) if
line[i:i+12].strip()]
if len(tmp) == 3:
hasvels = True
hasbox = False
elif len(tmp) == 6:
hasbox = True
hasvels = False
else:
raise TypeError('not a recognized Amber restart')
else:
# Ambiguous case
tmp = [float(line[i:i+12]) >= 60.0 for i in range(0, 72, 12)]
if any(tmp):
hasbox = True
hasvels = False
else:
hasvels = True
hasbox = False
elif len(lines) == lines_per_frame + 3:
hasbox = True
hasvels = False
elif len(lines) == 2*lines_per_frame + 2:
hasbox = False
hasvels = True
elif len(lines) == 2*lines_per_frame + 3:
hasbox = hasvels = True
else:
raise TypeError('Badly formatted restart file. Has %d lines for '
'%d atoms' % (len(lines), natom))
coordinates = np.zeros((1, natom, 3))
if time is None:
time = np.zeros(1)
else:
time = np.asarray((time,))
# Fill the coordinates
for i in range(lines_per_frame):
line = line
|
edx-solutions/edx-platform
|
lms/djangoapps/learner_dashboard/tests/test_utils.py
|
Python
|
agpl-3.0
| 602
| 0.001661
|
"""
Unit test module covering utils module
"""
|
import ddt
import six
from django.test import TestCase
from lms.djangoapps.learner_dashboard import utils
@ddt.ddt
class TestUtils(TestCase):
"""
The test case class covering the all the utils functions
"""
@ddt.data('path1/', '/path1/path2/', '/', '')
def test_strip_course_id(self, path):
"""
Test to make sure the function 'strip_course_id'
handles various url input
|
"""
actual = utils.strip_course_id(path + six.text_type(utils.FAKE_COURSE_KEY))
self.assertEqual(actual, path)
|
xorpaul/check_mk
|
web/htdocs/default_permissions.py
|
Python
|
gpl-2.0
| 7,863
| 0.010556
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Softwar
|
e Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import config
loaded_with_language = False
# .----------------------------------------------------------------------.
# | ____ _ _
|
|
# | | _ \ ___ _ __ _ __ ___ (_)___ ___(_) ___ _ __ ___ |
# | | |_) / _ \ '__| '_ ` _ \| / __/ __| |/ _ \| '_ \/ __| |
# | | __/ __/ | | | | | | | \__ \__ \ | (_) | | | \__ \ |
# | |_| \___|_| |_| |_| |_|_|___/___/_|\___/|_| |_|___/ |
# | |
# +----------------------------------------------------------------------+
# | Declare general permissions for Multisite |
# '----------------------------------------------------------------------'
def load():
global loaded_with_language
if loaded_with_language == current_language:
return
config.declare_permission_section("general", _('General Permissions'), 10)
config.declare_permission("general.use",
_("Use Multisite at all"),
_("Users without this permission are not let in at all"),
[ "admin", "user", "guest" ])
config.declare_permission("general.see_all",
_("See all Nagios objects"),
_("See all objects regardless of contacts and contact groups. "
"If combined with 'perform commands' then commands may be done on all objects."),
[ "admin", "guest" ])
declare_visual_permissions('views', _("views"))
declare_visual_permissions('dashboards', _("dashboards"))
config.declare_permission("general.view_option_columns",
_("Change view display columns"),
_("Interactively change the number of columns being displayed by a view (does not edit or customize the view)"),
[ "admin", "user", "guest" ])
config.declare_permission("general.view_option_refresh",
_("Change view display refresh"),
_("Interactively change the automatic browser reload of a view being displayed (does not edit or customize the view)"),
[ "admin", "user" ])
config.declare_permission("general.painter_options",
_("Change column display options"),
_("Some of the display columns offer options for customizing their output. "
"For example time stamp columns can be displayed absolute, relative or "
"in a mixed style. This permission allows the user to modify display options"),
[ "admin", "user", "guest" ])
config.declare_permission("general.act",
_("Perform commands"),
_("Allows users to perform Nagios commands. If no further permissions "
"are granted, actions can only be done on objects one is a contact for"),
[ "admin", "user" ])
config.declare_permission("general.see_sidebar",
_("Use Check_MK sidebar"),
_("Without this permission the Check_MK sidebar will be invisible"),
[ "admin", "user", "guest" ])
config.declare_permission("general.configure_sidebar",
_("Configure sidebar"),
_("This allows the user to add, move and remove sidebar snapins."),
[ "admin", "user" ])
config.declare_permission('general.edit_profile',
_('Edit the user profile'),
_('Permits the user to change the user profile settings.'),
[ 'admin', 'user' ]
)
config.declare_permission('general.edit_notifications',
_('Edit personal notification settings'),
_('This allows a user to edit his personal notification settings. You also need the permission '
'<i>Edit the user profile</i> in order to do this.'),
[ 'admin', 'user' ]
)
config.declare_permission('general.disable_notifications',
_('Disable all personal notifications'),
_('This permissions provides a checkbox in the personal settings of the user that '
'allows him to completely disable all of his notifications. Use with caution.'),
[ 'admin', ]
)
config.declare_permission('general.edit_user_attributes',
_('Edit personal user attributes'),
_('This allows a user to edit his personal user attributes. You also need the permission '
'<i>Edit the user profile</i> in order to do this.'),
[ 'admin', 'user' ]
)
config.declare_permission('general.change_password',
_('Edit the user password'),
_('Permits the user to change the password.'),
[ 'admin', 'user' ]
)
config.declare_permission('general.logout',
_('Logout'),
_('Permits the user to logout.'),
[ 'admin', 'user', 'guest' ]
)
config.declare_permission("general.ignore_soft_limit",
_("Ignore soft query limit"),
_("Allows to ignore the soft query limit imposed upon the number of datasets returned by a query"),
[ "admin", "user" ])
config.declare_permission("general.ignore_hard_limit",
_("Ignore hard query limit"),
_("Allows to ignore the hard query limit imposed upon the number of datasets returned by a query"),
[ "admin" ])
loaded_with_language = current_language
# TODO: This has been obsoleted by pagetypes.py
def declare_visual_permissions(what, what_plural):
config.declare_permission("general.edit_" + what,
_("Customize %s and use them") % what_plural,
_("Allows to create own %s, customize builtin %s and use them.") % (what_plural, what_plural),
[ "admin", "user" ])
config.declare_permission("general.publish_" + what,
_("Publish %s") % what_plural,
_("Make %s visible and usable for other users.") % what_plural,
[ "admin", "user" ])
config.declare_permission("general.see_user_" + what,
_("See user %s") % what_plural,
_("Is needed for seeing %s that other users have created.") % what_plural,
[ "admin", "user", "guest" ])
config.declare_permission("general.force_" + what,
_("Modify builtin %s") % what_plural,
_("Make own published %s override builtin %s for all users.") % (what_plural, what_plural),
[ "admin" ])
config.declare_permission("general.delete_foreign_" + what,
_("Delete foreign %s") % what_plural,
_("Allows to delete %s created by other users.") % what_plural,
[ "admin" ])
|
inducer/synoptic
|
synoptic/schema_ver_repo/versions/001_Rename_tagset_column.py
|
Python
|
mit
| 467
| 0.006424
|
from __future__ import absolute_import
from sqlalchemy import *
from migrate import *
meta = MetaData()
vieworderings = Table('vieworderings', meta,
Column('id', Integer, primary_key=True),
Column('tagset', Text()),
Column('timestamp', Float, index=True),
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
vieworderings.c.tagset.alter(name="norm_query")
def downgrade(migrate_
|
engine):
|
raise NotImplementedError
|
ericlyf/screenly-tools-schedulespreadsheet
|
src/model/Asset.py
|
Python
|
gpl-3.0
| 1,601
| 0.009369
|
'''
Created on 11May,2016
@author: linyufeng
'''
from utils.TimeZoneConverter import TimeZoneConverter
class Asset(object):
'''
contain the values will be insert into table Asset
'''
convert = TimeZoneConverter();
def __init__(self, startTime, endTime, directory, fileName, fileType, duration, sequence):
self.startTime = self.convert.victoriaToUCT(startTime)
self.endTime = self.convert.victoriaToUCT(endTime)
self.directory = directory
self.fileName = fileName
self.fileType = fileType
self.duration = int(duration)
self.sequence = int(sequence)
def getStartTime(self):
return self.startTime
|
def getEndTime(self):
return self.endTime
def getDire
|
ctory(self):
return self.directory
def getFileName(self):
return self.fileName
def getFileType(self):
return self.fileType
def getDuration(self):
return self.duration
def getSequence(self):
return self.sequence
def __eq__(self,other):
if isinstance(other, self.__class__):
if self.startTime == other.startTime:
if self.endTime == other.endTime:
if self.directory == other.directory:
if self.duration == other.duration:
if self.fileName == other.fileName:
if self.fileType == other.fileType:
return True
return False
|
duramato/SickRage
|
sickbeard/providers/hdtorrents.py
|
Python
|
gpl-3.0
| 10,280
| 0.00428
|
# Author: Idan Gutman
# Modified by jkaberg, https://github.com/jkaberg for SceneAccess
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import sickbeard
import generic
import urllib
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import db
from sickbeard import classes
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickrage.helper.exceptions import AuthException
import requests
from BeautifulSoup import BeautifulSoup as soup
from unidecode import unidecode
from sickbeard.helpers import sanitizeSceneName
from datetime import datetime
import traceback
class HDTorrentsProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "HDTorrents")
self.supportsBacklog = True
self.public = False
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
self.urls = {'base_url': 'https://hd-torrents.org',
'login': 'https://hd-torrents.org/login.php',
'search': 'https://hd-torrents.org/torrents.php?search=%s&active=1&options=0%s',
'rss': 'https://hd-torrents.org/torrents.php?search=&active=1&options=0%s',
'home': 'https://hd-torrents.org/%s'
}
self.url = self.urls['base_url']
self.cache = HDTorrentsCache(self)
self.categories = "&category[]=59&category[]=60&category[]=30&category[]=38"
def isEnabled(self):
return self.enabled
def _checkAuth(self):
if not self.username or not self.password:
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return True
def _doLogin(self):
if any(requests.utils.dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {'uid': self.username,
'pwd': self.password,
'submit': 'Confirm'}
response = self.getURL(self.urls['login'], post_data=login_params, timeout=30)
if not response:
logger.
|
log(u"Unable to connect to provider", logger.WARNING)
return False
if re.search('You need cookies enabled to log in.', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season
|
': [], 'Episode': [], 'RSS': []}
if not self._doLogin():
return results
for mode in search_strings.keys():
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
searchURL = self.urls['search'] % (urllib.quote_plus(search_string.replace('.', ' ')), self.categories)
else:
searchURL = self.urls['rss'] % self.categories
logger.log(u"Search URL: %s" % searchURL, logger.DEBUG)
if mode != 'RSS':
logger.log(u"Search string: %s" % search_string, logger.DEBUG)
data = self.getURL(searchURL)
if not data or 'please try later' in data:
logger.log("No data returned from provider", logger.DEBUG)
continue
html = soup(data)
if not html:
logger.log("No html data parsed from provider", logger.DEBUG)
continue
empty = html.find('No torrents here')
if empty:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
tables = html.find('table', attrs={'class': 'mainblockcontenttt'})
if not tables:
logger.log(u"Could not find table of torrents mainblockcontenttt", logger.ERROR)
continue
torrents = tables.findChildren('tr')
if not torrents:
continue
# Skip column headers
for result in torrents[1:]:
try:
cells = result.findChildren('td', attrs={'class': re.compile(r'(green|yellow|red|mainblockcontent)')})
if not cells:
continue
title = download_url = seeders = leechers = None
size = 0
for cell in cells:
try:
if None is title and cell.get('title') and cell.get('title') in 'Download':
title = re.search('f=(.*).torrent', cell.a['href']).group(1).replace('+', '.')
download_url = self.urls['home'] % cell.a['href']
if None is seeders and cell.get('class')[0] and cell.get('class')[0] in 'green' 'yellow' 'red':
seeders = int(cell.text)
if not seeders:
seeders = 1
elif None is leechers and cell.get('class')[0] and cell.get('class')[0] in 'green' 'yellow' 'red':
leechers = int(cell.text)
if not leechers:
seeders = 0
# Need size for failed downloads handling
if re.match(r'[0-9]+,?\.?[0-9]* [KkMmGg]+[Bb]+', cells[7].text):
size = self._convertSize(cells[7].text)
if not size:
size = -1
except:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)
if not all([title, download_url]):
continue
#Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items[mode].append(item)
except (AttributeError, TypeError, KeyError, ValueError):
continue
#For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def findPropers(self, search_date=datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_ep
|
annarev/tensorflow
|
tensorflow/python/keras/optimizer_v2/nadam.py
|
Python
|
apache-2.0
| 9,337
| 0.002356
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Nadam optimizer implementation."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.optimizers.Nadam')
class Nadam(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the NAdam algorithm.
Much like Adam is essentially RMSprop with momentum, Nadam is Adam with
Nesterov momentum.
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the exponentially weighted infinity norm.
epsilon: A small constant for numerical stability.
name: Optional name for the operations created when applying gradients.
Defaults to `"Nadam"`.
**kwargs: Keyword arguments. Allowed to be one of
`"clipnorm"` or `"clipvalue"`.
`"clipnorm"` (float) clips gradients by norm; `"clipvalue"` (float) clips
gradients by value.
Usage Example:
>>> opt = tf.keras.optimizers.Nadam(learning_rate=0.2)
>>> var1 = tf.Variable(10.0)
>>> loss = lambda: (var1 ** 2) / 2.0
>>> step_count = opt.minimize(loss, [var1]).numpy()
>>> "{:.1f}".format(var1.numpy())
9.8
Reference:
- [Dozat, 2015](http://cs229.stanford.edu/proj2015/054_report.pdf).
"""
_HAS_AGGREGATE_GRAD = True
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
name='Nadam',
**kwargs):
# Backwards compatibility with keras NAdam optimizer.
kwargs['decay'] = kwargs.pop('schedule_decay', 0.004)
learning_rate = kwargs.get('lr', learning_rate)
if isinstance(learning_rate, learning_rate_schedule.LearningRateSchedule):
raise ValueError('The Nadam optimizer does not support '
'tf.keras.optimizers.LearningRateSchedules as the '
'learning rate.')
super(Nadam, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or backend_config.epsilon()
self._m_cache = None
def _create_slots(self, var_list):
var_dtype = var_list[0].dtype.base_dtype
if self._m_cache is None:
self._m_cache = self.add_weight(
'momentum_cache',
shape=[],
dtype=var_dtype,
initializer='ones',
trainable=False,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._weights.append(self._m_cache)
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
# Create slots for the first moments.
self.add_slot(var, 'm')
for var in var_list:
# Create slots for the second moments.
self.add_slot(var, 'v')
def _prepare_local(self, var_device, var_dtype, apply_state):
lr_t = array_ops.identity(self._get_hyper('learning_rate', var_dtype))
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype))
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
local_step = math_ops.cast(self.iterations + 1, var_dtype)
next_step = math_ops.cast(self.iterations + 2, var_dtype)
decay_base = math_ops.cast(0.96, var_dtype)
m_t = beta_1_t * (1. - 0.5 * (
math_ops.pow(decay_base, self._initial_decay * local_step)))
m_t_1 = beta_1_t * (1. - 0.5 * (
math_ops.pow(decay_base, self._initial_decay * next_step)))
m_schedule_new = math_ops.cast(self._m_cache_read, var_dtype) * m_t
if var_dtype is self._m_cache.dtype:
m_schedule_new = array_ops.identity(state_ops.assign(
self._m_cache, m_schedule_new, use_locking=self._use_locking))
m_schedule_next = m_schedule_new * m_t_1
apply_state[(var_device, var_dtype)] = dict(
lr_t=lr_t,
neg_lr_t=-lr_t,
epsilon=ops.convert_to_tensor_v2_with_dispatch(self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_2_t=beta_2_t,
m_t=m_t,
m_t_1=m_t_1,
one_minus_beta_1_t=1 - beta_1_t,
one_minus_beta_2_t=1 - beta_2_t,
one_minus_m_t=1. - m_t,
one_minus_m_schedule_new=1. - m_schedule_new,
one_minus_m_schedule_next=1. - m_schedule_next,
v_t_prime_denominator=1. - math_ops.pow(beta_2_t, local_step),
)
def _prepare(self, var_list):
# Get the value of the momentum cache before starting to apply gradients.
self._m_cache_read = array_ops.identity(self._m_cache)
return super(Nadam, self)._prepare(var_list)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
g_prime = grad / coefficients['one_minus_m_schedule_new']
m_t = (coefficients['beta_1_t'] * m +
coefficients['one_minus_beta_1_t'] * grad)
m_t = state_ops.assign(m, m_t, use_locking=self._use_locking)
m_t_prime = m_t / coefficients['one_minus_m_schedule_next']
v_t = (coefficients['beta_2_t'] * v +
coefficients['one_minus_beta_2_t'] * math_ops.square(grad))
v_t = state_ops.assign(v, v_t, use_locking=self._use_locking)
v_t_prime = v_t / coefficients['v_t_prime_denominator']
m_t_bar = (coefficients['one_minus_m_t'] * g_prime +
coefficients['m_t_1'] * m_t_prime)
var_t = var - coefficients['lr_t'] * m_t_bar / (
math_ops.sqrt(v_t_prime) + coefficients['epsilon'])
return state_ops.assign(var, var_t, use_locking=self._use_locking).op
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dt
|
ype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
g_prime = grad / coefficients['one_minus_m_schedule_new']
# m_t = beta1 * m + (1 - beta1) * g_t
m_scaled_g_values = grad * coefficients['one_minus_beta_1_t']
m_t = state_ops.assign(m, m * coefficients['beta_1_t'],
use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
m_t_slice = array
|
_ops.gather(m_t, indices)
m_t_prime = m_t_slice / coefficients['one_minus_m_schedule_next']
m_t_bar = (coefficients['one_minus_m_t'] * g_prime +
coefficie
|
mbedmicro/pyOCD
|
test/unit/test_notification.py
|
Python
|
apache-2.0
| 4,008
| 0.002745
|
# pyOCD debugger
# Copyright (c) 2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import six
from enum import Enum
from pyocd.utility.notification import (Notification, Notifier)
# Test both int and string events.
EVENT_A = 1
EVENT_B = "foo"
class Subscriber(object):
def __init__(self):
self.was_called = False
self.last_note = None
def cb(self, note):
self.was_called = True
self.last_note = note
@pytest.fixture
def notifier():
return Notifier()
@pytest.fixture
def subscriber():
return Subscriber()
class TestNotification(object):
def test_basic_sub_and_send_a(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, EVENT_A)
notifier.notify(EVENT_A, self)
assert subscriber.was_called
assert subscriber.last_note.event == EVENT_A
assert subscriber.last_note.source == self
assert subscriber.last_note.data == None
def test_basic_sub_and_send_b(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, EVENT_B)
notifier.notify(EVENT_B, self)
assert subscriber.was_called
assert subscriber.last_note.event == EVENT_B
assert subscriber.last_note.source == self
assert subscriber.last_note.data == None
def test_unsub(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, EV
|
ENT_A)
notifier.unsubscribe(subscriber.cb)
notifier.notify(EVENT_A, self)
assert not subscriber.was_called
def test_unsub2(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, EVENT_A)
notifier.unsubscribe(subscriber.cb, events=[EVENT_B])
notifier.notify(EVENT_A, self)
assert subscriber.was_called
def test_multiple_sub(self, notifier, subscriber):
notifier.subscri
|
be(subscriber.cb, (EVENT_A, EVENT_B))
notifier.notify(EVENT_A, self)
assert subscriber.was_called
assert subscriber.last_note.event == EVENT_A
assert subscriber.last_note.source == self
assert subscriber.last_note.data == None
notifier.notify(EVENT_B, self)
assert subscriber.was_called
assert subscriber.last_note.event == EVENT_B
assert subscriber.last_note.source == self
assert subscriber.last_note.data == None
def test_diff_sub(self, notifier, subscriber):
s2 = Subscriber()
notifier.subscribe(subscriber.cb, EVENT_A)
notifier.subscribe(s2.cb, EVENT_B)
notifier.notify(EVENT_B, self)
assert not subscriber.was_called
assert s2.was_called
assert s2.last_note.event == EVENT_B
def test_src_sub(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, EVENT_A, source=self)
notifier.notify(EVENT_A, self)
assert subscriber.was_called
assert subscriber.last_note.event == EVENT_A
assert subscriber.last_note.source == self
assert subscriber.last_note.data == None
def test_src_sub2(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, EVENT_A, source=self)
notifier.notify(EVENT_A, notifier)
assert not subscriber.was_called
def test_unsub_src(self, notifier, subscriber):
notifier.subscribe(subscriber.cb, EVENT_A, source=self)
notifier.unsubscribe(subscriber.cb)
notifier.notify(EVENT_A, self)
assert not subscriber.was_called
|
dataplumber/nexus
|
analysis/tests/algorithms/longitudelatitudemap_test.py
|
Python
|
apache-2.0
| 3,692
| 0.003792
|
"""
Copyright (c) 2016 Jet Propulsion Laboratory,
California Institute of Technology. All rights reserved
"""
import json
import time
import unittest
import urllib
from multiprocessing.pool import ThreadPool
from unittest import skip
from mock import MagicMock
from nexustiles.nexustiles import NexusTileService
from shapely.geometry import box
from tornado.testing import AsyncHTTPTestCase, bind_unused_port
from tornado.web import Application
from NexusHandler import AlgorithmModuleWrapper
from webapp import ModularNexusHandlerWrapper
from webmodel import NexusRequestObject
from webservice.algorithms import LongitudeLatitudeMap
class TestLongitudeLatitudeMap(unittest.TestCase):
def setUp(self):
self.tile_service = NexusTileService()
def test_lin_reg(self):
LongitudeLatitudeMap.tile_service = self.tile_service
print next(
LongitudeLatitudeMap.regression_on_tiles((175.01, -42.68, 180.0, -40.2), box(-180, -90, 180, 90).wkt, 1,
time.time(), "JPL-L4_GHRSST-SSTfnd-MUR-GLOB-v02.0-fv04.1"))
def test_lat_lon_map_driver_mur(self):
# LongitudeLatitudeMap.tile_service = self.tile_service
print next(iter(LongitudeLatitudeMap.lat_lon_map_driver(box(-180, -90, 180, 90), 1, time.time(),
"JPL-L4_GHRSST-SSTfnd-MUR-GLOB-v02.0-fv04.1",
[(175.01, -42.68, 180.0, -40.2)])))
def test_lat_lon_map_driver_ecco(self):
bounding = box(-148, 38, -129, 53)
ds = "MXLDEPTH_ECCO_version4_release1"
start_seconds_from_epoch = 1
end_seconds_from_epoch = time.time()
boxes = self.tile_service.get_distinct_bounding_boxes_in_polygon(bounding, ds,
start_seconds_from_epoch,
end_seconds_from_epoch)
print LongitudeLatitudeMap.LongitudeLatitudeMapHandlerImpl.results_to_dicts(
LongitudeLatitudeMap.lat_lon_map_driver(bounding, start_seconds_from_epoch, end_seconds_from_epoch, ds,
[a_box.bounds for a_box in boxes]))
class HttpIntegrationTest(unittest.TestCase):
def get_app(self):
path = LongitudeLatitudeMap.LongitudeLatitudeMapHandlerImpl.path
algorithm = AlgorithmModuleWrapper(LongitudeLatitudeMap.LongitudeLatitudeMapHandlerImpl)
thread_pool = ThreadPool(processes=1)
return Application(
[(path, ModularNexusHandlerWrapper, dict(clazz=algorithm, algorithm_config=None, thread_pool=thread_p
|
ool))],
default_host=bind_unused_port()
)
# @skip("Integration test only. Works only if you have Solr and Cassandra running locally with data ingested")
def test_integration_all_in_tile(self):
def get_argument(*args, **kwarg
|
s):
params = {
"ds": "MXLDEPTH_ECCO_version4_release1",
"minLon": "-45",
"minLat": "0",
"maxLon": "0",
"maxLat": "45",
"startTime": "1992-01-01T00:00:00Z",
"endTime": "2016-12-01T00:00:00Z"
}
return params[args[0]]
request_handler_mock = MagicMock()
request_handler_mock.get_argument.side_effect = get_argument
request = NexusRequestObject(request_handler_mock)
handler_impl = LongitudeLatitudeMap.LongitudeLatitudeMapHandlerImpl()
response = handler_impl.calc(request)
print response.toJson()
|
goirijo/thermoplotting
|
old/testing/dataset/rough_cooling_nuke_0/cool.py
|
Python
|
mit
| 472
| 0.027542
|
import glob
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
#plt.style.use('ggplot')
dirlist=glob.glob("./mu-*.*")
fig=plt.figure()
ax=fig.add_subplot(111)
for run in dirlist:
rundata=np.loadtxt(run+"/debugout/tabula
|
ted_averages.txt")
x=rundata[:,4]/(rundata[:,3]+rundata[:,4])
y=rundata[:,6]
ax.scatter(x,y)
ax.set_xlabel(r"\textbf{x$_{\mathrm{Al}}$}")
ax.set
|
_ylabel(r"\textbf{T [K]}")
plt.show()
|
olafhauk/mne-python
|
mne/preprocessing/_csd.py
|
Python
|
bsd-3-clause
| 6,319
| 0
|
# Copyright 2003-2010 Jürgen Kayser <rjk23@columbia.edu>
# Copyright 2017 Federico Raimondo <federaimondo@gmail.com> and
# Denis A. Engemann <dengemann@gmail.com>
#
#
# The original CSD Toolbox can be find at
# http://psychophysiology.cpmc.columbia.edu/Software/CSDtoolbox/
# Authors: Denis A. Engeman <denis.engemann@gmail.com>
# Alex Rockhill <aprockhill@mailbox.org>
#
# License: Relicensed under BSD (3-clause) and adapted with
# permission from authors of original GPL code
import numpy as np
from scipy import linalg
from .. import pick_types
from ..utils import _validate_type, _ensure_int, _check_preload
from ..io import BaseRaw
from ..io.constants import FIFF
from ..epochs import BaseEpochs
from ..evoked import Evoked
from ..bem import fit_sphere_to_headshape
from ..channels.interpolation import _calc_g, _calc_h
def _prepare_G(G, lambda2):
G.flat[::len(G) + 1] += lambda2
# compute the CSD
Gi = linalg.inv(G)
TC = Gi.sum(0)
sgi = np.sum(TC) # compute sum total
return Gi, TC, sgi
def _compute_csd(G_precomputed, H, radius):
"""Compute the CSD."""
n_channels = H.shape[0]
data = np.eye(n_channels)
mu = data.mean(0)
Z = data - mu
Gi, TC, sgi = G_precomputed
Cp2 = np.dot(Gi, Z)
c02 = np.sum(Cp2, axis=0) / sgi
C2 = Cp2 - np.dot(TC[:, np.newaxis], c02[np.newaxis, :])
X = np.dot(C2.T, H).T / radius ** 2
return X
def compute_current_source_density(inst, sphere='auto', lambda2=1e-5,
stiffness=4, n_legendre_terms=50,
copy=True):
"""Get the current source density (CSD) transformation.
Transformation based on spherical spline surface Laplacian
:footcite:`PerrinEtAl1987,PerrinEtAl1989,Cohen2014,KayserTenke2015`.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
The data to be transformed.
sphere : array-like, shape (4,) | str
The sphere, head-model of the form (x, y, z, r) where x, y, z
is the center of the sphere and r is the radius in meters.
Can also be "auto" to use a digitization-based fit.
lambda2 : float
Regularization parameter, produces smoothness. Defaults to 1e-5.
stiffness : float
Stiffness of the spline.
n_legendre_terms : int
Number of Legendre terms to evaluate.
copy : bool
Whether to overwrite instance data or create a copy.
Returns
-------
inst_csd : instance of Raw, Epochs or Evoked
The transformed data. Output type will match input type.
Notes
-----
This function applies an average reference to the data if copy is False.
Do not transform CSD data to source space.
.. versionadded:: 0.20
References
----------
.. footbibliography::
"""
_validate_type(inst, (BaseEpochs, BaseRaw, Evoked), 'inst')
_check_preload(inst, 'Computing CSD')
if inst.info['custom_ref_applied'] == FIFF.FIFFV_MNE_CUSTOM_REF_CSD:
raise ValueError('CSD already applied, should not be reapplied')
inst = inst.copy() if copy else inst
picks = pick_types(inst.info, meg=False, eeg=True, exclude=[])
if any([ch in np.array(inst.ch_names)[picks] for ch in inst.info['bads']]):
raise ValueError('CSD cannot be computed with bad EEG channels. Either'
' drop (inst.drop_channels(inst.info[\'bads\']) '
'or interpolate (`inst.interpolate_bads()`) '
'bad EEG channels.')
if len(picks) == 0:
raise ValueError('No EEG ch
|
annels found.')
_validate_type(lambda2, 'numeric', 'lambda2')
if not 0 <= lambda2 < 1:
raise ValueError('lambda2 must be between 0 and 1, got %s' % lambda2)
_validate_type(stiffness, 'numeric', 'stiffness')
if stiffness < 0:
raise ValueError('stiffness must be non-negative got %s' % stiffness)
|
n_legendre_terms = _ensure_int(n_legendre_terms, 'n_legendre_terms')
if n_legendre_terms < 1:
raise ValueError('n_legendre_terms must be greater than 0, '
'got %s' % n_legendre_terms)
if isinstance(sphere, str) and sphere == 'auto':
radius, origin_head, origin_device = fit_sphere_to_headshape(inst.info)
x, y, z = origin_head - origin_device
sphere = (x, y, z, radius)
try:
sphere = np.array(sphere, float)
x, y, z, radius = sphere
except Exception:
raise ValueError(
f'sphere must be "auto" or array-like with shape (4,), '
f'got {sphere}')
_validate_type(x, 'numeric', 'x')
_validate_type(y, 'numeric', 'y')
_validate_type(z, 'numeric', 'z')
_validate_type(radius, 'numeric', 'radius')
if radius <= 0:
raise ValueError('sphere radius must be greater than 0, '
'got %s' % radius)
_validate_type(copy, (bool), 'copy')
pos = np.array([inst.info['chs'][pick]['loc'][:3] for pick in picks])
if not np.isfinite(pos).all() or np.isclose(pos, 0.).all(1).any():
raise ValueError('Zero or infinite position found in chs')
pos -= (x, y, z)
# Project onto a unit sphere to compute the cosine similarity:
pos /= np.linalg.norm(pos, axis=1, keepdims=True)
cos_dist = np.clip(np.dot(pos, pos.T), -1, 1)
# This is equivalent to doing one minus half the squared Euclidean:
# from scipy.spatial.distance import squareform, pdist
# cos_dist = 1 - squareform(pdist(pos, 'sqeuclidean')) / 2.
del pos
G = _calc_g(cos_dist, stiffness=stiffness,
n_legendre_terms=n_legendre_terms)
H = _calc_h(cos_dist, stiffness=stiffness,
n_legendre_terms=n_legendre_terms)
G_precomputed = _prepare_G(G, lambda2)
trans_csd = _compute_csd(G_precomputed=G_precomputed,
H=H, radius=radius)
epochs = [inst._data] if not isinstance(inst, BaseEpochs) else inst._data
for epo in epochs:
epo[picks] = np.dot(trans_csd, epo[picks])
inst.info['custom_ref_applied'] = FIFF.FIFFV_MNE_CUSTOM_REF_CSD
for pick in picks:
inst.info['chs'][pick].update(coil_type=FIFF.FIFFV_COIL_EEG_CSD,
unit=FIFF.FIFF_UNIT_V_M2)
return inst
|
jboes/jasp
|
jasp/volumetric_data.py
|
Python
|
gpl-2.0
| 5,279
| 0.001137
|
"""Module for reading volumetric data from VASP calculations.
Charge density and dipole moment
Local potential
Electron localization function
"""
import os
import numpy as np
from ase.calculators.vasp import Vasp, VaspChargeDensity
from POTCAR import get_ZVAL
def get_volumetric_data(self, filename='CHG', **kwargs):
"""Read filename to read the volumetric data in it.
Supported filenames are CHG, CHGCAR, and LOCPOT.
"""
atoms = self.get_atoms()
vd = VaspChargeDensity(filename)
data = np.array(vd.chg)
n0, n1, n2 = data[0].shape
s0 = np.linspace(0, 1, num=n0, endpoint=False)
s1 = np.linspace(0, 1, num=n1, endpoint=False)
s2 = np.linspace(0, 1, num=n2, endpoint=False)
X, Y, Z = np.meshgrid(s0, s1, s2)
C = np.column_stack([X.ravel(),
Y.ravel(),
Z.ravel()])
uc = atoms.get_cell()
real = np.dot(C, uc)
# now convert arrays back to unitcell shape
x = np.reshape(real[:, 0], (n0, n1, n2))
y = np.reshape(real[:, 1], (n0, n1, n2))
z = np.reshape(real[:, 2], (n0, n1, n2))
return x, y, z, data
def get_charge_density(self, spin=0, filename='CHG'):
"""Returns x, y, and z coordinate and charge density arrays.
Supported file formats: CHG, CHGCAR
:param int spin: an integer
:returns: x, y, z, charge density arrays
:rtype: 3-d numpy arrays
Relies on :func:`ase.calculators.vasp.VaspChargeDensity`.
"""
x, y, z, data = get_volumetric_data(self, filename=filename)
return x, y, z, data[spin]
Vasp.get_charge_density = get_charge_density
def get_local_potential(self):
"""Returns x, y, z, and local potential arrays
is there a spin for this?
We multiply the data by the volume because we are reusing the
charge density code which divides by volume.
"""
x, y, z, data = get_volumetric_data(self, filename='LOCPOT')
atoms = self.get_atoms()
return x, y, z, data[0] * atoms.get_volume()
Vasp.get_local_potential = get_local_potential
def get_elf(self):
"""Returns x, y, z and electron localization function arrays."""
x, y, z, data = get_volumetric_data(self, filename='ELFCAR')
atoms = self.get_atoms()
return x, y, z, data[0] * atoms.get_volume()
Vasp.get_elf = get_elf
def get_electron_density_center(self, spin=0, scaled=True):
"""Returns center of electron density.
If scaled, use scaled coordinates, otherwise use cartesian
coordinates.
"""
atoms = self.get_atoms()
x, y, z, cd = self.get_charge_density(spin)
n0, n1, n2 = cd.shape
nelements = n0 * n1 * n2
voxel_volume = atoms.get_volume() / nelements
total_electron_charge = cd.sum() * voxel_volume
electron_density_center = np.array([(cd * x).sum(),
(cd * y).sum(),
(cd * z).sum()])
electron_density_center *= voxel_volume
electron_density_center /= total_electron_charge
if scaled:
uc = atoms.get_cell()
return np.dot(np.linalg.inv(uc.T), electron_density_center.T).T
else:
return electron_density_center
def get_dipole_moment(self, atoms=None):
"""Tries to return the dipole vector of the unit cell in atomic units.
Returns None when CHG file is empty/not-present.
To get the dipole moment, use this formula:
dipole_moment = ((dipole_vector**2).sum())**0.5/Debye
"""
if atoms is None:
atoms = self.get_atoms()
try:
x, y, z, cd = self.get_charge_density()
except (IOError, IndexError):
# IOError: no CHG file, function called outside context manager
# IndexError: Empty CHG file, Vasp run with lcharg=False
return None
n0, n1, n2 = cd.shape
nelements = n0 * n1 * n2
voxel_volume = atoms.get_volume() / nelements
total_electron_charge = -cd.sum() * voxel_volume
electron_density_center = np.array([(cd*x).sum(),
(cd*y).sum(),
(cd*z).sum()])
electron_density_center *= voxel_volume
electron_density_center /= total_electron_charge
electron_dipole_moment = electron_density_center * total_electron_charge
electron_dipole_moment *= -1.0
# now the ion charge center
LOP = self.get_pseudopotentials()
ppp = os.environ['VASP_PP_PATH']
# make dictionary for ease of use
zval = {}
for sym, ppath, hash in LOP:
# out a bug above. os.path.join discards the root if the
# second path starts with /, which makes it look like an
# absolute path. the get_pseudopote
|
ntials code returns a path
# with a / in the beginning.
fullpath = ppp + ppath
|
z = get_ZVAL(fullpath)
zval[sym] = z
ion_charge_center = np.array([0.0, 0.0, 0.0])
total_ion_charge = 0.0
for atom in atoms:
Z = zval[atom.symbol]
total_ion_charge += Z
pos = atom.position
ion_charge_center += Z*pos
ion_charge_center /= total_ion_charge
ion_dipole_moment = ion_charge_center * total_ion_charge
dipole_vector = (ion_dipole_moment + electron_dipole_moment)
return dipole_vector
Vasp.get_dipole_moment = get_dipole_moment
|
coderbone/SickRage-alt
|
sickchill/views/config/anime.py
|
Python
|
gpl-3.0
| 2,470
| 0.001619
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickchill.github.io
#
# This file is part of SickChill.
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, print_function, unicode_literals
# Stdlib Imports
import os
# Third Party Imports
from tornado.web import addslash
# F
|
irst Party Imports
import sickbeard
from sickbeard import config, filters, ui
from sickchill.helper.encoding import ek
from sickchill.views.common import PageTemplate
from sickchill.views.routes import Route
# Local Folder Imports
from .index import Config
@Route('/config/anime(/?.*)', name='config:anime')
class ConfigAnime(Config):
def __init__(self, *args, **kwargs):
super(ConfigAnime, self).__init__(*args, **kwargs)
@addslash
def index(self, *args_, **kwargs_):
t = PageTemplate(rh=self, filename="config_anime.mako")
return t.render(submenu=self.ConfigMenu(), title=_('Config - Anime'),
header=_('Anime'), topmenu='config',
controller="config", action="anime")
def saveAnime(self, use_anidb=None, anidb_username=None, anidb_password=None, anidb_use_mylist=None,
split_home=None, split_home_in_tabs=None):
sickbeard.USE_ANIDB = config.checkbox_to_value(use_anidb)
sickbeard.ANIDB_USERNAME = anidb_username
sickbeard.ANIDB_PASSWORD = filters.unhide(sickbeard.ANIDB_PASSWORD, anidb_password)
sickbeard.ANIDB_USE_MYLIST = config.checkbox_to_value(anidb_use_mylist)
sickbeard.ANIME_SPLIT_HOME = config.checkbox_to_value(split_home)
sickbeard.ANIME_SPLIT_HOME_IN_TABS = config.checkbox_to_value(split_home_in_tabs)
sickbeard.save_config()
ui.notifications.message(_('Configuration Saved'), ek(os.path.join, sickbeard.CONFIG_FILE))
return self.redirect("/config/anime/")
|
icoxfog417/pyfbi
|
tests/demo.py
|
Python
|
mit
| 404
| 0.007426
|
import
|
os
import sys
import time
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import pyfbi
@pyfbi.target
def func1():
time.sleep(1)
def func2():
time.sleep(2)
@pyfbi.target
def func3():
time.sleep(3)
with pyfbi.watch():
[f() for f in (func1, func2, func3)]
pyfbi.show()
with pyfbi.watch
|
(global_watch=True):
[f() for f in (func1, func2, func3)]
pyfbi.show()
|
illfelder/compute-image-packages
|
packages/python-google-compute-engine/setup.py
|
Python
|
apache-2.0
| 3,041
| 0.002302
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a Python package of the Linux guest environment."""
import glob
import sys
import setuptools
install_requires = ['setuptools']
if sys.version_info < (3, 0):
install_requires += ['boto']
if sys.version_info >= (3, 7):
install_requires += ['distro']
setuptools.setup(
author='Google Compute Engine Team',
author_email='gc-team@google.com',
description='Google Compute Engine',
include_package_data=Tr
|
ue,
install_requires=install_requires,
l
|
icense='Apache Software License',
long_description='Google Compute Engine guest environment.',
name='google-compute-engine',
packages=setuptools.find_packages(),
url='https://github.com/GoogleCloudPlatform/compute-image-packages',
version='20191112.0',
# Entry points create scripts in /usr/bin that call a function.
entry_points={
'console_scripts': [
'google_accounts_daemon=google_compute_engine.accounts.accounts_daemon:main',
'google_clock_skew_daemon=google_compute_engine.clock_skew.clock_skew_daemon:main',
'google_instance_setup=google_compute_engine.instance_setup.instance_setup:main',
'google_network_daemon=google_compute_engine.networking.network_daemon:main',
'google_metadata_script_runner=google_compute_engine.metadata_scripts.script_manager:main',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
],
)
|
utmi-2014/utmi-soft3
|
beginner_tutorials/scripts/simple_action_client.py
|
Python
|
mit
| 582
| 0.024055
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('beginner_tutorials')
import rospy
import actionlib
from beginner_tutorials.msg import *
|
if __name__ == '__main__':
rospy.init_node('do_dishes_client')
client = actionlib.SimpleActionClient('do_dishes', DoDishesAction)
client.wait_for_server()
goal = DoDishesGoal()
goal.dishwasher_id = 1
print "Requesting dishwasher %d"%(goal.dishwasher_id)
client.send_goal(goal)
client.wait_for_result(rospy.Duration.from_sec(5.0))
result = cl
|
ient.get_result()
print "Resulting dishwasher %d"%(result.total_dishes_cleaned)
|
rbuffat/pyidf
|
tests/test_setpointmanagermultizonecoolingaverage.py
|
Python
|
apache-2.0
| 2,146
| 0.003728
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.setpoint_managers import SetpointManagerMultiZoneCoolingAverage
log = logging.getLogger(__name__)
class TestSetpointManagerMultiZoneCoolingAverage(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_setpointmanagermultizonecoolingaverage(self):
pyidf.validation_level = ValidationLevel.error
obj = SetpointManagerMultiZoneCoolingAverage()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_hvac_air_loop_name = "object-list|HVAC Air Loop Name"
obj.hvac_air_loop_name = var_hvac_air_loop_name
# real
var_minimum_setpoint_temperature = 0.0001
obj.minimum_setpoint_temperature = var_minimum_setpoint_temperature
# real
var_maximum_setpoint_temperature = 0.0001
obj.maximum_setpoint_temperature = var_maximum_setpoint_temperature
# node
var_setpoint_node_or_nodelist_name = "node|Setpoint Node or NodeList Name"
obj.setpoint_node_or_nodelist_name = var_setpoint_node_or_nodelist_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2
|
= IDF(self.path)
self.assertEqual(idf2.setpointmanagermultizonecoolingaverages[0].name, var_name)
self.assertEqual(idf2.setpointmanagermultizonecoolingaverages[0].hvac_air_loop_name, var_hvac_air_loop_name)
self.assertAlmostEqual(idf2.setpointmanagermultizonecoolingaverages[0].minimum_setpoint_temperature, var_minimum_setpoint_temperature)
self.assertAlmostEqual(idf2.setpointmanagermultizonecoolingaverages[
|
0].maximum_setpoint_temperature, var_maximum_setpoint_temperature)
self.assertEqual(idf2.setpointmanagermultizonecoolingaverages[0].setpoint_node_or_nodelist_name, var_setpoint_node_or_nodelist_name)
|
badbytes/pymeg
|
pdf2py/el.py
|
Python
|
gpl-3.0
| 2,695
| 0.017811
|
try:from scipy.io.numpyio import *
except ImportError: from extra.numpyio import *
import os
from time import strftime
import shutil
class getpoints:
def __init__(self, elfile):
datetime = strftime("%Y-%m-%d %H:%M:%S").replace(' ', '_')
self.elfile = elfile
if os.path.isfile(elfile) == True:
print 'step 1: is file.'
#if os.path.isfile(elfile) == True:
# print 'detecting previous attempted fix'
shutil.copy(elfile, elfile+datetime)
fileopen = open(elfile, 'r')
fileopen.seek(0, os.SEEK_SET) #24bytes
self.lpa = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(64, os.SEEK_SET) #24bytes
self.rpa = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(128, os.SEEK_SET) #24bytes
self.nas = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(192, os.SEEK_SET) #24bytes
self.cz = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(256, os.SEEK_SET) #24bytes
self.ini = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(320, os.SEEK_SET) #24bytes
self.coil1 = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(384, os.SEEK_SET) #24bytes
self.coil2 = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(448, os.SEEK_SET) #24bytes
self.coil3 = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(512, os.SEEK_SET) #24bytes
self.coil4 = fread(fileopen, 3, 'd', 'd', 0)
fileopen.seek(576, os.SEEK_SET)
self.coil5 = fread(fileopen, 3, 'd', 'd', 0)
class read(getpoints):
def write(self):
filewrite = open(self.elfile, 'r+')
filewrite.seek(0, os.SEEK_SET) #24bytes
fwrite(filewrite,
|
3, self.lpa, 'd', 1)
filewrite.seek(64, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.rpa, 'd', 1)
filewrite.seek(128, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.nas, 'd', 1)
filewrite.seek(192, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.cz, 'd', 1)
filewrite.seek(256, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.ini, 'd', 1)
filewrite.seek(320, os.SEEK_SET) #24bytes
fwrite(filewrite,
|
3, self.coil1, 'd', 1)
filewrite.seek(384, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.coil2, 'd', 1)
filewrite.seek(448, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.coil3, 'd', 1)
filewrite.seek(512, os.SEEK_SET) #24bytes
fwrite(filewrite, 3, self.coil4, 'd', 1)
filewrite.seek(576, os.SEEK_SET)
fwrite(filewrite, 3, self.coil5, 'd', 1)
print 'step two: finished fixing byte swap'
|
scorphus/thefuck
|
thefuck/rules/tsuru_not_command.py
|
Python
|
mit
| 527
| 0
|
import re
from thefuck.utils import get_all_matched_commands, replace_command, for_app
@for_app('tsuru')
def match
|
(command):
return (' is not a tsuru command. See "tsuru help".' in command.output
and '\nDid you mean?\n\t' in command.output)
def get_new_command(command):
broken_cmd = re.fin
|
dall(r'tsuru: "([^"]*)" is not a tsuru command',
command.output)[0]
return replace_command(command, broken_cmd,
get_all_matched_commands(command.output))
|
beppec56/core
|
wizards/com/sun/star/wizards/agenda/AgendaWizardDialogResources.py
|
Python
|
gpl-3.0
| 14,293
| 0.004128
|
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
class AgendaWizardDialogResources(object):
RID_AGENDAWIZARDDIALOG_START = 5000
RID_AGENDAWIZARDROADMAP_START = 5049
RID_COMMON_START = 500
SECTION_ITEMS = "AGENDA_ITEMS"
SECTION_TOPICS = "AGENDA_TOPICS"
SECTION_MINUTES_ALL = "MINUTES_ALL"
SECTION_MINUTES = "MINUTES"
def __init__(self, oWizardResource):
self.resAgendaWizardDialog_title = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 1)
self.resoptMakeChanges_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 2)
self.reslblTemplateName_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 3)
self.reslblTemplatePath_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 4)
self.reslblProceed_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 5)
self.reslblTitle1_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 6)
self.reslblTitle3_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 7)
self.reslblTitle2_value = oWizardResource.getResText(
A
|
gendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 8)
self.reslblTitle4_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 9)
self.reslblTitle5_value = oWizardResource.getResText(
Agend
|
aWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 10)
self.reslblTitle6_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 11)
self.reschkMinutes_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 12)
self.reslblHelp1_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 13)
self.reslblTime_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 14)
self.reslblTitle_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 15)
self.reslblLocation_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 16)
self.reslblHelp2_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 17)
self.resbtnTemplatePath_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 18)
self.resoptCreateAgenda_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 19)
self.reslblHelp6_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 20)
self.reslblTopic_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 21)
self.reslblResponsible_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 22)
self.reslblDuration_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 23)
self.reschkConvenedBy_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 24)
self.reschkPresiding_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 25)
self.reschkNoteTaker_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 26)
self.reschkTimekeeper_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 27)
self.reschkAttendees_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 28)
self.reschkObservers_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 29)
self.reschkResourcePersons_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 30)
self.reslblHelp4_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 31)
self.reschkMeetingTitle_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 32)
self.reschkRead_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 33)
self.reschkBring_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 34)
self.reschkNotes_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 35)
self.reslblHelp3_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 36)
self.reslblDate_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 38)
self.reslblHelpPg6_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 39)
self.reslblPageDesign_value = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 40)
self.resDefaultFilename = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 41)
self.resDefaultFilename = self.resDefaultFilename[:-4] + ".ott"
self.resDefaultTitle = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 42)
self.resErrSaveTemplate = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 43)
self.resPlaceHolderTitle = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 44)
self.resPlaceHolderDate = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 45)
self.resPlaceHolderTime = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 46)
self.resPlaceHolderLocation = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 47)
self.resPlaceHolderHint = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 48)
self.resErrOpenTemplate = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 56)
self.itemMeetingType = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 57)
self.itemBring = oWizardResource.getResText(
AgendaWizardDialogResources.RID_AGENDAWIZARDDIALOG_START + 58)
self.itemRead = oWizardResource.getResText(
A
|
TomWerner/AlumniMentoring
|
mentoring/migrations/0012_auto_20161027_1700.py
|
Python
|
mit
| 5,231
| 0.001529
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-10-27 22:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mentoring', '0011_auto_20161027_1653'),
]
operations = [
migrations.AlterField(
model_name='menteepreference',
name='first_choice',
field=models.CharField(choices=[('1', 'Choice of Major'), ('2', 'Academia or Industry'), ('3', 'Resume/CV Critique'), ('4', 'Parenting vs Career'), ('5', 'Work life balance'), ('6', 'Life after Iowa'), ('7', 'Study Abroad'), ('8', 'International Experience'), ('9', 'Fellowships'), ('10', 'Goals'), ('11', 'Shadowing Opportunities'), ('12', 'Grad school applications'), ('13', 'Med school applications'), ('14', 'Job/Internship search'), ('15', 'Networking'), ('16', 'Advanced degrees'), ('17', 'Workplace issues'), ('18', 'Personal Experiences'), ('19', 'Gender specific')], max_length=1),
),
migrations.AlterField(
model_name='menteepreference',
name='preferred_communication',
field=models.CharField(choices=[('1', 'In Person'), ('2', 'Phone'), ('3', 'Email'), ('4', 'Other')], max_length=1),
),
migrations.AlterField(
model_name='menteepreference',
name='second_choice',
field=models.CharField(blank=True, choices=[('1', 'Choice of Major'), ('2', 'Academia or Industry'), ('3', 'Resume/CV Critique'), ('4', 'Parenting vs Career'), ('5', 'Work life balance'), ('6', 'Life after Iowa'), ('7', 'Study Abroad'), ('8', 'International Experience'), ('9', 'Fellowships'), ('10', 'Goals'), ('11', 'Shadowing Opportunities'), ('12', 'Grad school applications'), ('13', 'Med school applications'), ('14', 'Job/Internship search'), ('15', 'Networking'), ('16', 'Advanced degrees'), ('17', 'Workplace issues'), ('18', 'Personal Experiences'), ('19', 'Gender specific')], max_length=1, null=True),
),
migrations.AlterField(
model_name='menteepreference',
name='third_choice',
field=models.CharField(blank=True, choices=[('1', 'Choice of Major'), ('2', 'Academia or Industry'), ('3', 'Resume/CV Critique'), ('4', 'Parenting vs Career'), ('5', 'Work life balance'), ('6', 'Life after Iowa'), ('7', 'Study Abroad'), ('8', 'International Experience'), ('9', 'Fellowships'), ('10', 'Goals'), ('11', 'Shadowing Opportunities'), ('12', 'Grad school applications'), ('13', 'Med school applications'), ('14', 'Job/Internship search'), ('15', 'Networking'), ('16', 'Advanced degrees'), ('17', 'Workplace issues'), ('18', 'Personal Experiences'), ('19', 'Gender specific')], max_length=1, null=True),
),
migrations.AlterField(
model_name='mentorpreference',
name='first_choice',
field=models.CharField(choices=[('1', 'Choice of Major'), ('2', 'Academia or Industry'), ('3', 'Resume/CV Critique'), ('4', 'Parenting vs Career'), ('5', 'Work life balance'), ('6', 'Life after Iowa'), ('7', 'Study Abroad'), ('8', 'International Experience'), ('9', 'Fellowships'), ('10', 'Goals'), ('11', 'Shadowing Opportunities'), ('12', 'Grad school applications'), ('13', 'Med school applications'), ('14', 'Job/Internship search'), ('15', 'Networking'), ('16', 'Advanced degrees'), ('17', 'Workplace issues'), ('18', 'Personal Experiences'), ('19', 'Gender specific')], max_length=1),
),
migrations.AlterField(
model_name='mentorpreference',
name='preferred_communication',
field=models.CharField(choices=[('1', 'In Person'), ('2', 'Phone'), ('3', 'Email'), ('4', 'Other')], max_length=1),
),
migrations.AlterField(
model_name='mentorpreference',
name='second_choice',
field=models.CharField(blank=True, choices=[('1', 'Choice of Major'), ('2', 'Academia or Industry'), ('3', 'Resume/CV Critique'), ('4', 'Parenting vs Career'), ('5', 'Work life balance'), ('6', 'Life after Iowa'), ('7', 'Study Abroad'), ('8', 'International Experience'), ('9', 'Fellowships'), ('10', 'Goals'), ('11', 'Shadowing Opportunities'), ('12', 'Grad school applications'), ('13', 'Med school applications'), ('14', 'Job/Internship search'), ('15', 'Networking'), ('16', 'Advanced degrees'), ('17', 'Workplace issues'), ('18', 'Personal Experiences'), ('19', 'Gender specific')], max_length=1, null=True),
),
migrations.AlterField(
model_name='mentorpreference',
name='third_choice',
field=models.CharField(blank=True, choices=[('1', 'Choice of Major'), ('2', 'Academia or Indus
|
try'), ('3', 'Resume/CV Critique'), ('4', 'Parenting vs Career'), ('5', 'Work life balance'), ('6', 'Life after Iowa'), ('7', 'Study Abroad'), ('8', 'International Experience'), ('9', 'Fellowships'), ('10', 'Goals'), ('11', 'Shadowing Opportunities'), ('12', 'Grad school applications'), ('13', 'Med school applications'), ('14', 'Job/Internship search'), ('15', 'Networking'), ('16', 'Advanced degrees'), ('17', 'Workplace issues'), ('18', 'Personal Experiences'), ('19
|
', 'Gender specific')], max_length=1, null=True),
),
]
|
stevarino/cmsc495
|
mac_app/migrations/0001_initial.py
|
Python
|
mit
| 4,401
| 0.004317
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-13 18:29
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mac_app.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(default='corp', max_length=128)),
('firstname', models.CharField(blank=True, max_length=128)),
('lastname', models.CharField(blank=True, max_length=128)),
('address', models.CharField(blank=True, max_length=256)),
('city', models.CharField(blank=True, max_length=128)),
('state', models.CharField(blank=True, max_length=128)),
('postal_code', models.CharField(blank=True, max_length=16)),
('phone', models.CharField(blank=True, max_length=16)),
('department', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='mac_app.Department')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(default=mac_app.models.get_new_ticket_number, max_length=32)),
('creation_date', models.DateTimeField(default=datetime.datetime.now, verbose_name='date created')),
('dsk_stage', models.IntegerField(default=0)),
('net_stage', models.IntegerField(default=0)),
('fac_stage', models.IntegerField(default=0)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets_started', to=settings.AUTH_USER_MODEL)),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='TicketNote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creation_date', models.DateTimeField(default=datetime.datetime.now, verbose_name='date created')),
('content', models.TextField(blank=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('ticket', models.ForeignKey(on_delete=django.db.
|
models.deletion.CASCADE, to='mac_app.Ticket')),
],
),
migrations.CreateModel(
name='TicketType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name
|
='ID')),
('name', models.CharField(max_length=16)),
('dsk_seq', models.IntegerField(default=0, verbose_name='Desktop Sequence')),
('dsk_msg', models.TextField(verbose_name='Desktop Message')),
('net_seq', models.IntegerField(default=0, verbose_name='Network Sequence')),
('net_msg', models.TextField(verbose_name='Network Message')),
('fac_seq', models.IntegerField(default=0, verbose_name='Facilities Sequence')),
('fac_msg', models.TextField(verbose_name='Facilities Message')),
],
),
migrations.AddField(
model_name='ticket',
name='ticket_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mac_app.TicketType'),
),
]
|
merriam/dectools
|
dectools/test/test_make_call_if.py
|
Python
|
mit
| 5,089
| 0.005699
|
import dectools.dectools as dectools
from print_buffer import print_buffer
p = print_buffer()
prnt = p.rint
printed = p.rinted
prnt("Testing the @dectools.make_call_if decorator")
prnt("==================")
prnt("*No additonal parameters...")
@dectools.make_call_if
def check_security(function, args, kwargs):
prnt("I trust you to run function", function.__name__)
return True
@check_security
def add_two(first, second):
prnt("Adding", first, "and", second)
return first + second
result = add_two(1, 2)
printed("I trust you to run function add_two", back=-2)
printed("Adding 1 and 2")
prnt("1+2=", result)
printed("1+2= 3")
prnt("==================")
prnt("Example storing data in the function itself. Watch out for __slots__")
@dectools.make_call_if
def limit(function, args, kwargs, maximum_calls=10):
""" You may only call some number of times """
if hasattr(function, "__limit_calls__"):
called = function.__limit_calls__ + 1
else:
called = 1
function.__limit_calls__ = called
if called > maximum_calls:
prnt("calls exceeded. denied.")
return False
else:
prnt("called", called, "times. ", maximum_calls - called, "remaining.")
return True
@limit(2)
def hello():
prnt("hello")
hello()
printed("called 1 times. 1 remaining.", back=-2)
printed("hello")
hello()
printed("called 2 times. 0 remaining.", back=-2)
printed("hello")
hello()
printed("calls exceeded. denied.")
hello()
printed("calls exceeded. denied.")
prnt("==================")
prnt("*Extra parameters checked/ripped by the decorator")
@dectools.make_call_if
def security_level(function, args, kwargs, level):
prnt("You are level", level)
if level == "admin":
return True
elif "override_security" in kwargs:
del kwargs['override_security']
return True
else:
return False
@security_level("admin")
def add_three(first, second, third):
prnt("adding", first, "+", second, "+", third)
return first + second + third
result = add_three(1, 2, 3)
prnt("1+2+3 =", result)
@security_level("user")
def subtract_two(first, second):
prnt("subtracting ", first, "-", second)
return first - second
result = subtract_two(3, 2)
prnt("3-2=", result)
prnt("*ripping out an argument in passing")
@security_level("user")
def one():
prnt("one")
@security_level("user")
def two(**kwargs):
assert not kwargs
prnt("You are new number 2.")
one()
printed("You are level user")
prnt("meaning it failed security and did not print one")
try:
one(override_security=True)
except TypeError:
prnt("I used to be able to do that - Now I use signature preserving functions.")
prnt("one() takes no parameters")
printed("one() takes no parameters")
two(override_security=True)
printed("You are new number 2.")
prnt("That can work however, because two() takes arbitrary parameters.")
prnt("meaning the decorator took a parameter from the call, acted on it, and removed it from the call.")
prnt("==================")
prnt("*Example of relying on a global")
features = ["general", "print", "email", "twitter"]
@dectools.make_call_if
def is_feature_installed(function, args, kwargs, feature="general")
|
:
global features
prnt("checking feature", feature)
if feature in features:
features.remove(feature)
return True
else:
return False
@is_feature_installed()
def general_stuff():
prnt("general stuff")
general_stuff()
printed("checking feature general", -2)
printed("general stuff")
general_stuff()
printed("checking feature general")
@is_feature_installed("facebook")
def post_to_facebook(account, password):
prnt("posting now")
po
|
st_to_facebook("me", "password")
printed("checking feature facebook")
prnt("Now update the global")
features = ["general", "print", "email", "twitter", "facebook"]
post_to_facebook("you", "123")
printed("checking feature facebook", -2)
printed("posting now")
prnt("==================")
prnt("Fun with bad usage")
@is_feature_installed
def forgot_to_use_parens_there():
pass
try:
forgot_to_use_parens_there()
except TypeError as te:
prnt(te[0])
assert "parenthesis" in te[0]
prnt("At least there is a hint.")
printed("At least there is a hint.")
try:
@dectools.call_if(is_feature_installed, feature = "facebook")
def it_is_a_decorator_not_a_mold():
pass
except AssertionError as ae:
prnt(ae[0])
assert "already a decorator" in ae[0]
prnt("At least there is a hint.")
printed("At least there is a hint.")
try:
@check_security()
def that_takes_no_parameters():
pass
except TypeError as te:
prnt(te[0])
assert "parenthesis" in te[0]
prnt("At least there is a hint.")
printed("At least there is a hint.")
try:
@check_security('f')
def that_takes_no_parameters():
pass
except AssertionError as ae:
prnt(ae[0])
assert "type" in ae[0]
prnt("Not a good hint I grant.")
prnt("At least there is a hint.")
printed("At least there is a hint.")
prnt("All done")
|
petersilva/metpx-sarracenia
|
sarra/plugins/root_chown.py
|
Python
|
gpl-2.0
| 4,501
| 0.037325
|
#!/usr/bin/python3
"""
This plugin can be use to add ownership and group of a file in a post message (on_post)
and change the owner/group of products at destination (on_file)
Sample usage:
plugin root_chown.py
Options
-------
If a users/groups differs from the source to the destination, the user can supply a mapping file
which would associate SRC_UG to DEST_UG. The filepath is given by giving an absolute path with
option 'mapping_file'. Default value is None, which means give set ownership as for the source user/group.
The 'mapping_file' file format would simply be, a one liner per owner/group
aspymjg:cidx mjg777:ssc_di
here aspymjg:cidx would be the source ownership (source user:group)
and mjg777:ssc_di the destination ownership (destination user:group)
"""
import grp,os,pwd
class ROOT_CHOWN(object):
def __init__(self,parent):
parent.declare_option( 'mapping_file' )
self.mapping = {}
def on_start(self,parent):
logger = parent.logger
if not hasattr( parent, "mapping_file" ):
parent.mapping_file = [ None ]
return True
mf_path = parent.mapping_file[0]
try:
f = open(mf_path,'r')
while True:
l = f.readline()
if not l : break
l2 = l.strip()
parts = l2.split()
if len(parts) != 2 :
logger.error("wrong mapping line %s" % l)
continue
self.mapping[parts[0]] = parts[1]
f.close()
logger.info( "ROOT_CHOWN mapping_file loaded %s" % mf_path)
except: logger.error("ROOT_CHOWN problem when parsing %s" % mf_path)
return True
def on_post(self,parent):
import grp,os,pwd
logger = parent.logger
msg = parent.msg
logger.debug("ROOT_CHOWN on_post")
new_dir = parent.new_dir
new_file = parent.new_file
# if remove ...
if msg.headers['sum'].startswith('R,') and not 'newname' in msg.headers: return True
# if move ... sr_watch sets new_dir new_file on destination file so we are ok
# already set ... check for mapping switch
if 'ownership' in msg.headers :
ug = msg.headers['ownership']
if ug in self.mapping :
logger.debug("ROOT_CHOWN mapping from %s to %s" % (ug,self.mapping[ug]))
msg.headers['ownership'] = self.mapping[ug]
return True
# need to add ownership in message
try :
local_file = new_dir + os.sep + new_file
s = os.lstat(local_file)
username = pwd.getpwuid(s.st_uid).pw_name
group = grp.getgrgid(s.st_gid).gr_name
ug = "%s:%s" % (username,group)
# check for mapping switch
if ug in self.mapping :
logger.debug("ROOT_CHOWN mapping from %s to %s" % (ug,self.mapping[ug]))
ug = self.mapping[ug]
msg.headers['ownership'] = ug
logger.debug("ROOT_CHOWN set ownership in headers %s" % msg.headers['ownership'])
except: logger.error("ROOT_CHOWN could not set ownership %s" % local_file)
return True
def on_file(self,parent):
import grp,os,pwd
logger = parent.logger
msg = parent.msg
logger.debug("ROOT_CHOWN on_file")
# the message does not have the requiered info
if not 'ownership' in msg.headers :
logger.info("ROOT_CHOWN no
|
ownership in msg_headers")
return True
# it does, check for mapping
ug = msg.headers['ownership']
if ug in self
|
.mapping :
logger.debug("received ownership %s mapped to %s" % (ug,self.mapping[ug]))
ug = self.mapping[ug]
# try getting/setting ownership info to local_file
local_file = parent.new_dir + os.sep + parent.new_file
try :
parts = ug.split(':')
username = parts[0]
group = parts[1]
uid = pwd.getpwnam(username).pw_uid
gid = grp.getgrnam(group ).pw_gid
os.chown(local_file,uid,gid)
logger.info( "ROOT_CHOWN set ownership %s to %s" % (ug,local_file))
except: logger.error("ROOT_CHOWN could not set %s to %s" % (ug,local_file))
return True
self.plugin='ROOT_CHOWN'
|
tessercat/ddj
|
modules/poems.py
|
Python
|
mit
| 7,006
| 0.003283
|
import logging
from gluon import A
from gluon import DIV
from gluon import H3
from gluon import H4
from gluon import H5
from gluon import I
from gluon import IS_IN_SET
from gluon import LI
from gluon import P
from gluon import MARKMIN
from gluon import SQLFORM
from gluon import SPAN
from gluon import TAG
from gluon import UL
from gluon import URL
from gluon import XML
from gluon import xmlescape
date_format = '%B %Y'
index_class = 'col-xs-12 col-sm-6 col-md-4'
poem_class = 'col-xs-12 col-sm-10 col-md-8'
def _thumb(row, cls, title=None):
""" Return a column DIV thumbnail. """
caption = DIV(
H3(row.chapter.title),
H4('Chapter %i' % row.chapter.number),
H5(row.published.strftime(date_format)),
H3(row.intro_hanzi),
H4(row.intro_en),
_class='caption',
_role='button',
_title=title)
anchor = A(
caption,
_class='ddj-thumbnail',
_href=URL('poems', 'chapter', args=[row.chapter.number]))
thumbnail = DIV(anchor, _class='thumbnail')
return DIV(thumbnail, _class=cls)
def chapter(poem, db, uhdb):
""" Return a bootstrap row for a poem row. """
if not poem:
raise Exception('No such poem')
qry = ((db.verse.book==1) & (db.verse.chapter==poem.chapter))
verse = db(qry).select().first()
title = H3(poem.chapter.title)
subtitle = H4('Chapter %i' % poem.chapter.number)
published = H5(poem.published.strftime(date_format))
stanzas = verse.en.split('\r\n\r\n')
content = []
for stanza in stanzas:
content.append(P(XML(stanza.replace('\r\n', '<br />'))))
link = P(
A(
I('Go to the study version'),
_href=URL('studies', 'chapter', args=[poem.chapter.number]),
_style='color:inherit;',
_title='Study version'),
_style='font-size:0.9em;padding-top:1em')
content.append(P(link))
column = DIV(title, subtitle, published, *content, _class=poem_class)
return DIV(
column, _class='row',
_style='font-size:1.12em;white-space:nowrap;')
def chapter_range(page_number):
if page_number >= 1 and page_number <= 9:
low = ((page_number-1)*9)+1
high = page_number*9
else:
raise Exception('No such page')
return low, high
def decache(chapter, db):
""" Clear study chapter cache data. """
import studies
from gluon import current
# Decache the associated study.
studies.decache(chapter, db)
# Decache the poem itself.
current.cache.ram('poem-%d' % chapter, None)
# Decache links in the next poem.
qry = db.poem.chapter > int(chapter)
nxt = db(qry).select(limitby=(0,1), orderby=db.poem.chapter)
if nxt:
current.cache.ram('links-%d' % nxt.first().chapter, None)
# Decache links in the previous poem.
qry = db.poem.chapter < chapter
prev = db(qry).select(limitby=(0,1), orderby=~db.poe
|
m.chapter)
if prev:
current.cache.ram('links-%d' % prev.first().chapter, None)
# Decache the page containing the poem.
page = (chapter + 8) / 9
current.cache.ram('poems-%d' % page, None)
def grid(db, deletable=F
|
alse):
""" Return an SQLFORM.grid to manage poems. """
createargs = editargs = viewargs = {
'fields': [
'chapter', 'published', 'intro_hanzi', 'intro_en']}
fields = [
db.poem.chapter,
db.poem.published,
db.poem.intro_hanzi,
db.poem.intro_en]
maxtextlengths = {'poem.published': 50}
onupdate = lambda form: decache(int(form.vars.chapter), db)
db.poem.published.represent = lambda value, row: value.strftime(date_format)
db.poem.chapter.requires = IS_IN_SET(range(1, 82), zero=None)
grid = SQLFORM.grid(
db.poem,
createargs=createargs,
csv=False,
deletable=deletable,
details=False,
editargs=editargs,
fields=fields,
maxtextlengths=maxtextlengths,
oncreate=onupdate,
onupdate=onupdate,
orderby=db.poem.chapter,
paginate=None,
searchable=False,
viewargs=viewargs)
return grid
def index(page_number, db):
""" Return a row DIV of a page of poems. """
low, high = chapter_range(page_number)
qry = ((db.poem.chapter>=low) & (db.poem.chapter<=high))
thumbs = []
for row in db(qry).select(orderby=db.poem.chapter):
thumbs.append(_thumb(row, index_class))
return DIV(thumbs, _class='row display-flex')
def links(poem, db):
""" Return a row DIV of prev/next poems. """
thumbs = []
# Next.
qry = db.poem.chapter > poem.chapter
nxt = db(qry).select(limitby=(0,1), orderby=db.poem.chapter)
if not nxt:
qry = db.poem.chapter >= 1
nxt = db(qry).select(limitby=(0,1), orderby=db.poem.chapter)
if nxt:
thumbs.append(_thumb(nxt.first(), poem_class, 'Next'))
# Previous.
qry = db.poem.chapter < poem.chapter
prev = db(qry).select(limitby=(0,1), orderby=~db.poem.chapter)
if not prev:
qry = db.poem.chapter <= 81
prev = db(qry).select(limitby=(0,1), orderby=~db.poem.chapter)
if prev:
thumbs.append(_thumb(prev.first(), poem_class, 'Previous'))
# Bootstrap.
return DIV(
thumbs,
_class='row',
_style='padding-top: 2.5em;')
def pager(db):
""" Return a row DIV for a pager. """
from gluon import current
# Previous/current/next page.
if current.request.args(0):
current_page = int(current.request.args(0))
else:
current_page = 1
prev_page = current_page - 1
next_page = current_page + 1
# List of LI.
pages = []
# Previous/left.
li_class = ''
href = URL('poems', 'page', args=[str(prev_page)])
if prev_page < 1:
li_class = 'disabled'
href = '#'
elif prev_page == 1:
href = URL('poems', 'index')
span = SPAN(xmlescape(u'\u4e0a'), **{'_aria-hidden': 'true'})
anchor = A(span, _href=href, **{'_aria-label': 'Previous'})
pages.append(LI(anchor, _class=li_class, _title='Previous Page'))
# Chapter range links.
for page in range(1, 10):
li_class = ''
href = URL('poems', 'page', args=[str(page)])
page_range = ['%d-%d' % (((page-1)*9)+1, page*9)]
if page == 1:
href = URL('poems', 'index')
if page == current_page:
li_class = 'active'
page_range.append(SPAN('(current)', _class='sr-only'))
anchor = A(page_range, _href=href)
pages.append(LI(anchor, _class=li_class))
# Next/right.
li_class = ''
href = URL('poems', 'page', args=[str(next_page)])
if next_page > 9:
li_class = 'disabled'
href = '#'
span = SPAN(xmlescape(u'\u4e0b'), **{'_aria-hidden': 'true'})
anchor = A(span, _href=href, **{'_aria-label': 'Next'})
pages.append(LI(anchor, _class=li_class, _title='Next Page'))
# Together.
return UL(pages, _class='pagination')
|
volker-kempert/python-tools
|
src/find_duplicates/cli_find_dups.py
|
Python
|
mit
| 2,168
| 0.001384
|
#!/usr/bin/env python
# -*- UTF8 -*-
import sys
import argparse
from .biz_func import *
from .io import format_duplicates
from utils.verbose import Verboser
try:
from _version import __version__
except ImportError:
__version__ = '--development-instance--'
def find_duplicates(root_dir):
"""
find_duplicates identifies duplicate files below a directory
:param root_dir (string): path describing the directory where duplicate
files shall be searched for
:returns (list): containing lists of strings with file names (full path)
"""
return process_candidate_files(root_dir)
def parse_args(args=sys.argv):
""" find duplicates main function"""
parser = argparse.ArgumentParser(prog='find_duplicates', description="""
Find duplicates in file system
Scan a directory for duplicate files by checking name, size and md5.
The output is written to stdout.
- Each filename (full path) is written in one line
- Set of identical file names is separated by a line
|
containi
|
ng '--'
""")
parser.add_argument('scandir', action='store', default='.',
help='Name of the directory to scan')
parser.add_argument('--version',
help='Print the package version to stdout',
action='version', version='%(prog)s ' + __version__)
parser.add_argument('-v', '--verbose', action='count', default=0,
help='print verbosity information (can be multiple given)')
parser.add_argument('-o', '--outfile',
type=argparse.FileType('w'), default=sys.stdout,
help='Write output to file instead of stdout')
return parser.parse_args(args)
def main():
""" find duplicates main function"""
args = parse_args(sys.argv[1:])
Verboser().set_level(args.verbose)
Verboser().verbose_min("Scandir {0}".format(args.scandir))
duplicates = find_duplicates(args.scandir)
sort_members(duplicates)
duplicates = make_unique(duplicates)
format_duplicates(duplicates, args.outfile)
args.outfile.close()
if __name__ == "__main__":
main()
|
zunaid321/Lenovo_A820_kernel_kk
|
bionic/libc/kernel/tools/update_all.py
|
Python
|
gpl-2.0
| 2,397
| 0.01627
|
#!/usr/bin/env python
#
import sys, cpp, kernel,
|
glob, os, re, getopt, clean_header
from defaults import *
from utils import *
def usage():
print """\
usage: %(progname)s [kernel-original-path]
this program is used to update all the auto-generated clean h
|
eaders
used by the Bionic C library. it assumes the following:
- a set of source kernel headers is located in '../original',
relative to the program's directory
- the clean headers will be placed in '../arch-<arch>/asm',
'../common/linux', '../common/asm-generic', etc..
""" % { "progname" : os.path.basename(sys.argv[0]) }
sys.exit(0)
try:
optlist, args = getopt.getopt( sys.argv[1:], '' )
except:
# unrecognized option
sys.stderr.write( "error: unrecognized option\n" )
usage()
if len(optlist) > 0 or len(args) > 1:
usage()
progdir = find_program_dir()
if len(args) == 1:
original_dir = args[0]
if not os.path.isdir(original_dir):
panic( "Not a directory: %s\n" % original_dir )
else:
original_dir = kernel_original_path
if not os.path.isdir(original_dir):
panic( "Missing directory, please specify one through command-line: %s\n" % original_dir )
# find all source files in 'original'
#
sources = []
for root, dirs, files in os.walk( original_dir ):
for file in files:
base, ext = os.path.splitext(file)
if ext == ".h":
sources.append( "%s/%s" % (root,file) )
b = BatchFileUpdater()
for arch in kernel_archs:
b.readDir( os.path.normpath( progdir + "/../arch-%s" % arch ) )
b.readDir( os.path.normpath( progdir + "/../common" ) )
#print "OLD " + repr(b.old_files)
oldlen = 120
for path in sources:
dst_path, newdata = clean_header.cleanupFile(path, original_dir)
if not dst_path:
continue
b.readFile( dst_path )
r = b.editFile( dst_path, newdata )
if r == 0:
state = "unchanged"
elif r == 1:
state = "edited"
else:
state = "added"
str = "cleaning: %-*s -> %-*s (%s)" % ( 35, "<original>" + path[len(original_dir):], 35, dst_path, state )
if sys.stdout.isatty():
print "%-*s" % (oldlen,str),
if (r == 0):
print "\r",
else:
print "\n",
oldlen = 0
else:
print str
oldlen = len(str)
print "%-*s" % (oldlen,"Done!")
b.updateGitFiles()
sys.exit(0)
|
mdneuzerling/AtomicAlgebra
|
AtomicAlgebra.py
|
Python
|
gpl-3.0
| 23,406
| 0.007007
|
from functools import reduce
from itertools import chain, combinations, product, permutations
# This class is used to represent and examine algebras on atom tables.
# It is intended to be used for nonassociative algebras, but this is not assumed.
class AtomicAlgebra:
# Create an algebra from a table of atoms, which gives compositions, and a converse structure.
# An atom table is a list of lists, with each entry a set (as distinct from set) of atoms.
# The set of atoms is interpreted as a union. Atoms are 'a', 'b', 'c', etc.
# The converse pair is a list of 2-tuples of atoms.
# If 'a' is converse to 'b', write as ('a','b').
# If 'a' is symmetric, write as ('a', 'a').
# Can also give converses as a dictionary.
# Algebra may not necessarily meet all the axioms.
def __init__(self, atom_table, converse = None):
if type(atom_table) == str:
atom_table = self._string_to_atom_table(atom_table)
self.n_atoms = len(atom_table[0])
self.atoms = [set([chr(i + 97)]) for i in range(self.n_atoms)]
self.atom_table = atom_table
# If no converses given assume all atoms are symmetric.
if converse == None:
self.converse = [(x,x) for x in [chr(i + 97) for i in range(self.n_atoms)]]
# Can give atoms as a dictionary on atoms...
if type(converse) is dict:
self.converse_pairs = self.converse_dict_to_pairs(converse)
self.converse_dict = converse
# ... or as a list of tuples.
else:
self.converse_pairs = converse
self.converse_dict = self.converse_pairs_to_dict(converse)
# set up the basic properties of the algebra.
self._non_identity_atoms = None
self.top = reduce(lambda x, y : x | y, self.atoms)
self.zero = set()
# The elements are the power set of the atoms.
self.elements = [combinations(list(self.top), n) for n in range(self.n_atoms + 1)]
self.elements = list(chain.from_iterable(self.elements))
self.elements = [set(element) for element in self.elements]
self.n_elements = 2**self.n_atoms
self.n_non_zero_elements = self.n_elements - 1
self.symmetric_atoms = [x[0] for x in self.converse_pairs if x[0] == x[1]]
self.non_symmetric_pairs = [x for x in self.converse_pairs if x[0] != x[1]]
self._cyclePartition = self.cycle_partition(self.converse_dict, self.n_atoms)
self._identity = None
self._semigroup = None
# properties
self._is_NA = None
self._satisfies_WA_axiom = None
self._is_WA = None
self._satisfies_SA_axiom = None
self._is_SA = None
self._is_associative = None
self._is_RA = None
# A human-readable description of each relation algebra axiom.
AXIOMS = {
"R01": "+-commutativity: x + y = y + x",
"R02": "+-associativity: x + (y + z) = (x + y) + z",
"R03": "Huntington's axiom: -(-x + -y) + -(-x + y) = x",
"R04": ";-associativity: x;(y;z) = (x;y);z",
"R05": ";-distributivity: (x + y);z = x;z + y;z",
"R06": "identity law: x;1' = x",
"R07": "converse-involution: con(con(x)) = x",
"R08": "converse-distributivity: con(x + y) = con(x) + con(y)",
"R09": "converse-involutive distributivity: con(x;y) = con(y);con(x)",
"R10": "Tarski/De Morgan axiom: con(x); -(x;y) + -y = y",
"WA" : "((id . x) . top) . top = (id . x) . (top . top)",
"SA" : "(x . top) . top = x . (top . top)"
}
# Given an atom table as a string, convert it to a matrix (list of lists).
# This method seems to be powered by magic, and should be redone.
@staticmethod
def _string_to_atom_table(matrix_string):
M0 = matrix_string.replace(" ", "")
M1 = M0.strip()[1:-1]
M2 = M1.strip()[1:-1]
M3 = [line.split(',') for line in M2.split('],[')]
M4 = [[set(entry.split("+"))-set(['0']) for entry in line] for line in M3]
return M4
# Converses can be given as a list of tuples [('a', 'a'), ('b', 'c')] or a
# dictionary on atoms {'a': 'a', 'b': 'c', 'c': 'b'}. Tne following
|
# methods convert between the two.
@staticmethod
def converse_pairs_to_dict(converse_pairs):
converse_dict = dict()
for converse_pair in converse_pairs:
if convers
|
e_pair[0] == converse_pair[1]: # symmetric atom
converse_dict[converse_pair[0]] = converse_pair[0]
else: # non-symmetric atoms
converse_dict[converse_pair[0]] = converse_pair[1]
converse_dict[converse_pair[1]] = converse_pair[0]
return converse_dict
@staticmethod
def converse_dict_to_pairs(converse_dict):
converse_pairs = []
for pair in converse_dict.items():
if pair not in converse_pairs and pair[::-1] not in converse_pairs:
converse_pairs.append(pair)
return converse_pairs
# Given a triple and a converse structure, generate the cycle including that triple.
# This is an implementation of the relation algebra concept of a Peircean transform.
# Cycle generated by (x,y,z) is:
# [ (x,y,z), (con(x),z,y), (y,con(z),con(x)),
# (con(y),con(x),con(z)),(con(z),x,con(y)), (z,con(y),x) ]
# A triple in a cycle is consistent if and only if all triples in the cycle are consistent.
@staticmethod
def cycle(triple, converse_dict):
if type(converse_dict) is not dict:
converse_dict = AtomicAlgebra.converse_pairs_to_dict(converse_dict)
x, y, z = triple
cycle = []
cycle.append(triple)
cycle.append((converse_dict[x], z, y))
cycle.append((y, converse_dict[z], converse_dict[x]))
cycle.append((converse_dict[y], converse_dict[x], converse_dict[z]))
cycle.append((converse_dict[z], x, converse_dict[y]))
cycle.append((z, converse_dict[y], x))
cycle.sort() # Prevents duplicates when using cycle_partition
return list(set(cycle)) # Remove duplicates.
# Given a converse structure, partition the triples of elements into cycles.
@staticmethod
def cycle_partition(converse_dict, n_atoms):
if type(converse_dict) is not dict:
converse_dict = AtomicAlgebra.converse_pairs_to_dict(converse_dict)
atoms = [chr(i + 97) for i in range(n_atoms)]
parts = []
for triple in product(atoms, repeat = 3):
cycle = AtomicAlgebra.cycle(triple, converse_dict)
if cycle not in parts: parts.append(cycle)
return parts
# Give a human readable report on a list of failed axioms, eg. ["R01", "R02", "R07"].
@staticmethod
def report_failed_axioms(failed_axioms):
if type(failed_axioms) is not list: failed_axioms = [failed_axioms]
for axiom in failed_axioms:
print("Fails axiom " + axiom + ": " + AtomicAlgebra.AXIOMS[axiom] + ".")
# Through unions, we can extend any map between atoms to a map between
# elements of algebras. For example, if 'a' -> 'b' and 'c' -> 'd', then
# {'a', 'b'} -> {'c', 'd'}. Thus, every map between atoms uniquely defines
# a map between elements. In practice we always define maps on atoms only.
# We use the term "function" in reference to a map between elements.
@staticmethod
def atom_function(atom_map, element):
if type(element) is str:
return atom_map[element]
else:
return set([AtomicAlgebra.atom_function(atom_map, x) for x in element])
# Turns a single atom 'a' into a set(['a']).
@staticmethod
def make_set(x):
if type(x) == str:
x = set([x])
if type(x) != type(set()):
raise TypeError('An element of the algebra needs to be either a set of atoms or a string representing a single atom.')
return x
# Check if a map between a
|
SUSE/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/compute/v2016_04_30_preview/models/virtual_machine_scale_set_extension_profile.py
|
Python
|
mit
| 1,089
| 0.000918
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code g
|
enerated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization
|
import Model
class VirtualMachineScaleSetExtensionProfile(Model):
"""Describes a virtual machine scale set extension profile.
:param extensions: The virtual machine scale set child extension
resources.
:type extensions: list of :class:`VirtualMachineScaleSetExtension
<azure.mgmt.compute.compute.v2016_04_30_preview.models.VirtualMachineScaleSetExtension>`
"""
_attribute_map = {
'extensions': {'key': 'extensions', 'type': '[VirtualMachineScaleSetExtension]'},
}
def __init__(self, extensions=None):
self.extensions = extensions
|
GoogleCloudPlatform/PerfKitBenchmarker
|
tests/scripts/gcp_pubsub_client_test.py
|
Python
|
apache-2.0
| 1,810
| 0.003315
|
"""Tests for data/messaging_service/gcp_pubsub_client.py."""
import unittest
from unittest import mock
from perfkitbenchmarker.scripts.messaging_service_scripts.gcp import gcp_pubsub_client
NUMBER_OF_MESSAGES = 1
MESSAGE_SIZE = 10
PROJECT = 'pkb_test_project'
TOPIC = 'pkb_test_topic'
SUBSCRIPTION = 'pkb_test_subscription'
@mock.patch('google.cloud.pubsub_v1.PublisherClient')
@mock.patch('google.cloud.pubsub_v1.SubscriberClient')
class GCPPubSubClientTest(unittest.TestCase):
def testPublishMessage(self, _, publisher_mock):
message = 'test_message'.encode('utf-8')
topic_path = publisher_mock.return_value.topic_path.return_value = 'test_topic_path'
gcp_interface = gcp_pubsub_client.GCPPubSubClient(PROJECT, TOPIC,
SUBSCRIPTION)
gcp_interface.publish_message(message)
# assert publish was called
publisher_mock.return_value.publish.assert_called_with(topic_path, message)
def testPullMessage(self, subscriber_mock, _):
gcp_interface = gcp_pubsub_client.GCPPubSubClient(PROJECT, TOPIC,
SUBSCRIPTION)
gcp_in
|
terface.pull_message()
# assert pull was called
subscriber_mock.return_value.pull.assert_called()
def testAcknowledgeReceivedMessage(self, subscriber_mock, _):
response_mock = mock.MagicMock()
response_mock.return_value.received_messages[
0].message.data = 'mocked_message'
gcp_interface = gcp_pubsub_client.GCPPubSubClient(PROJECT, TOPIC,
|
SUBSCRIPTION)
gcp_interface.acknowledge_received_message(response_mock)
# assert acknowledge was called
subscriber_mock.return_value.acknowledge.assert_called()
if __name__ == '__main__':
unittest.main()
|
CKPalk/MachineLearning
|
Assignment1/id3.py
|
Python
|
mit
| 4,116
| 0.082119
|
import sys
import math
import CSVReader
import DecisionTree
# GLOBALS
attributes = list()
data = list(list())
pre_prune_tree = True
# MATH FUNCTIONS
def Entropy( yesNo ):
yes = yesNo[0]; no = yesNo[1]
if no == 0 or yes == 0: return 0
total = no + yes
return ( -( yes / total ) * math.log( yes / total, 2 )
- ( no / total ) * math.log( no / total, 2 ) )
def Gain( S, Attr ):
entropy_S = Entropy( resultsOfSet( S ) )
entropy_sum = 0
for label in AttributeLabels( S, Attr ):
subset_S = setWithLabel( S, Attr, label )
entropy_sum += ( ( len( subset_S ) / len( S ) ) * Entropy( resultsOfSet( subset_S ) ) )
return entropy_S - entropy_sum
# HELPER
def indexOfAttribute( Attr ):
return attributes.index( Attr )
def AttributeLabels( S, Attr ):
index = indexOfAttribute( Attr )
return list( set( [ row[ index ] for row in S ] ) )
def setWithLabel( S, Attr, Label ):
return list( filter
|
( lambda row: row[ indexOfAttribute( Attr ) ] == Label, S ) )
def resultsOfSet( S ):
no = len( list( filter( lambda row: row[-1] is False, S ) ) )
return ( len( S ) - no, no )
def convertRowToDict( row ):
return { attributes[ i ] : row[ i ] for i in range( len( row ) ) }
def extractDecisions( S ):
return [ row[-1] for row in S ]
def compareDecisions( D1, D2 ):
return sum( [ 1 if D1[i] is D2[i] else 0 for i in range( min( len( D1 ), len( D2 ) ) ) ] )
|
/ min( len( D1 ), len( D2 ) )
def findBestAttribute( S, attrs ):
bestAttributeAndGain = ( None, -1 ) if not pre_prune_tree else ( None, 0 )
#print( "+-- Gain ---" )
for attr in attrs:
attrGain = Gain( S, attr )
#print( "|", attr, "%0.7f" % ( attrGain ) )
if attrGain > bestAttributeAndGain[ 1 ]:
bestAttributeAndGain = ( attr, attrGain )
#print( "+-------------" )
#print( " > Best attribute:", bestAttributeAndGain[0], "\n" )
return bestAttributeAndGain[ 0 ]
# Prediction is by higher percentage
def getPrediction( S ):
res = resultsOfSet( S )
return True if res[ 0 ] > res[ 1 ] else False
def createNextNodes( parent ):
if len( parent.attributes ) == 0: # No remaining attributes
return
trueParentDataSubset = setWithLabel( parent.dataSet, parent.attribute, True )
trueBestAttribute = findBestAttribute( trueParentDataSubset, parent.attributes )
if trueBestAttribute is not None:
parent.newTruePath( trueBestAttribute, trueParentDataSubset )
createNextNodes( parent.truePath )
falseParentDataSubset = setWithLabel( parent.dataSet, parent.attribute, False )
falseBestAttribute = findBestAttribute( falseParentDataSubset, parent.attributes )
if falseBestAttribute is not None:
parent.newFalsePath( falseBestAttribute, falseParentDataSubset )
createNextNodes( parent.falsePath )
# ID3
def createDecisionTree( attrs, rows ):
tree = DecisionTree.DecisionTree( attrs )
rootAttributes = attrs[:-1]
bestAttribute = findBestAttribute( rows, rootAttributes )
outcomes = [ row[-1] for row in rows ]
allSame = True
for outcome in outcomes:
if outcome != outcomes[0]: allSame = False; continue
if allSame:
tree.newRoot( None, rootAttributes, rows )
return tree
tree.newRoot( bestAttribute, rootAttributes, rows )
createNextNodes( tree.root ) # Recursively builds tree
return tree
# MAIN
def main( argv ):
if len(argv) != 3:
return print( "ERROR: Usage \"python3 id3.py <training-set> <test-set> <model-file>\"" )
training_tup = CSVReader.readBooleanCSV( argv[ 0 ] )
global attributes; attributes = training_tup[ 0 ]
global data ; data = training_tup[ 1 ]
testing_tup = CSVReader.readBooleanCSV( argv[ 1 ] )
test_attributes = testing_tup[ 0 ]
test_data = testing_tup[ 1 ]
test_decisions = extractDecisions( test_data )
print( "Attributes" )
print( ', '.join( attributes ), "\n" )
tree = createDecisionTree( attributes, data )
predictions = [ getPrediction( tree.dataSetFromDecisions( convertRowToDict( row ) ) ) for row in test_data ]
print( "\nPrediction accuracy vs. testing data:", "{}%\n\n".format( 100 * compareDecisions( predictions, test_decisions ) ) )
tree.printTree( argv[2] )
if __name__=='__main__':
main( sys.argv[1:] )
|
jardiacaj/finem_imperii
|
world/migrations/0004_tileevent_create_timestamp.py
|
Python
|
agpl-3.0
| 501
| 0.001996
|
# Generated by Django 2.1 on 2018-08-19 13:12
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
|
dependencies = [
('world', '0003_auto_20180819_0036'),
]
operations = [
migrations.AddField(
|
model_name='tileevent',
name='create_timestamp',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
mxOBS/deb-pkg_trusty_chromium-browser
|
third_party/WebKit/Source/bindings/scripts/v8_union.py
|
Python
|
bsd-3-clause
| 6,305
| 0.000952
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import v8_utilities
UNION_H_INCLUDES = frozenset([
'bindings/core/v8/Dictionary.h',
'bindings/core/v8/ExceptionState.h',
'bindings/core/v8/V8Binding.h',
'platform/heap/Handle.h',
])
UNION_CPP_INCLUDES_BLACKLIST = frozenset([
# This header defines static functions needed to implement event handler
# attributes in interfaces that implement GlobalEventHandlers. They are not
# needed or used by UnionTypes*.cpp, so including the header causes
# compilation errors.
# FIXME: We should solve this problem in a way that doesn't involve special-
# casing a header like this.
'core/dom/GlobalEventHandlers.h',
])
cpp_includes = set()
header_forward_decls = set()
def union_context(union_types, interfaces_info):
cpp_includes.clear()
header_forward_decls.clear()
# For container classes we strip nullable wrappers. For example,
# both (A or B)? and (A? or B) will become AOrB. This should be OK
# because container classes can handle null and it seems that
# distinguishing (A or B)? and (A? or B) doesn't make sense.
container_cpp_types = set()
union_types_for_containers = set()
nullable_cpp_types = set()
for union_type in union_types:
cpp_type = union_type.cpp_type
if cpp_type not in container_cpp_types:
union_types_for_containers.add(union_type)
container_cpp_types.add(cpp_type)
if union_type.includes_nullable_type:
nullable_cpp_types.add(cpp_type)
union_types_for_containers = sorted(union_types_for_containers,
key=lambda union_type: union_type.cpp_type)
nullable_cpp_types = sorted(nullable_cpp_types)
return {
'containers': [container_context(union_type, interfaces_info)
for union_type in union_types_for_containers],
'cpp_includes': sorted(cpp_includes - UNION_CPP_INCLUDES_BLACKLIST),
'header_forward_decls': sorted(header_forward_decls),
'header_includes': sorted(UNION_H_INCLUDES),
'nullable_cpp_types': nullable_cpp_types,
}
def container_context(union_type, interfaces_info):
members = []
# These variables refer to member contexts if the given union type has
# corresponding types. They are used for V8 -> impl conversion.
array_buffer_type = None
array_buffer_view_type = None
array_or_sequence_type = None
boolean_type = None
dictionary_type = None
interface_types = []
numeric_type = None
string_type = None
for member in union_type.member_types:
context = member_context(member, interfaces_info)
members.append(context)
if member.base_type == 'ArrayBuffer':
if array_buffer_type:
raise Exception('%s is ambiguous.' % union_type.name)
array_buffer_type = context
elif member.base_type == 'ArrayBufferView':
if array_buffer_view_type:
raise Exception('%s is ambiguous.' % union_type.name)
array_buffer_view_type = context
# FIXME: Remove generic Dictionary special casing.
elif member.is_dictionary or member.base_type == 'Dictionary':
if dictionary_type:
raise Exception('%s is ambiguous.' % union_type.name)
dictionary_type = context
elif member.is_array_or_sequence_type:
if array_or_sequence_type:
raise Exception('%s is ambiguous.' % union_type.name)
array_or_sequence_type = context
elif member.is_interface_type:
interface_types.append(context)
elif member is union_type.boolean_member_type:
boolean_type = context
elif member is union_type.numeric_member_type:
numeric_type = context
elif member is union_type.string_member_type:
string_type = context
else:
raise Exception('%s is not supported as an union member.' % member.name)
# Nullable restriction checks
nullable_members = union_type.number_of_nullable_member_types
if nullabl
|
e_members > 1:
raise Exception('%s contains more than one nullable members' % union_type.name)
if dictionary_type and nullable_members == 1:
raise Exception('%s has a dictionary and a nullable member' % union_type.name)
return {
'array_buffer_type': array_buffer_type,
'array_buffer_view_type': array_buffer_view_type,
'array_or_sequence_type': array
|
_or_sequence_type,
'boolean_type': boolean_type,
'cpp_class': union_type.cpp_type,
'dictionary_type': dictionary_type,
'type_string': str(union_type),
'includes_nullable_type': union_type.includes_nullable_type,
'interface_types': interface_types,
'members': members,
'needs_trace': any(member['is_traceable'] for member in members),
'numeric_type': numeric_type,
'string_type': string_type,
}
def member_context(member, interfaces_info):
cpp_includes.update(member.includes_for_type)
interface_info = interfaces_info.get(member.name, None)
if interface_info:
cpp_includes.update(interface_info.get('dependencies_include_paths', []))
header_forward_decls.add(member.implemented_as)
if member.is_nullable:
member = member.inner_type
return {
'cpp_name': v8_utilities.uncapitalize(member.name),
'cpp_type': member.cpp_type_args(used_in_cpp_sequence=True),
'cpp_local_type': member.cpp_type,
'cpp_value_to_v8_value': member.cpp_value_to_v8_value(
cpp_value='impl.getAs%s()' % member.name, isolate='isolate',
creation_context='creationContext'),
'is_traceable': member.is_traceable,
'rvalue_cpp_type': member.cpp_type_args(used_as_rvalue_type=True),
'specific_type_enum': 'SpecificType' + member.name,
'type_name': member.name,
'v8_value_to_local_cpp_value': member.v8_value_to_local_cpp_value(
{}, 'v8Value', 'cppValue', isolate='isolate',
needs_exception_state_for_string=True),
}
|
yasir1brahim/OLiMS
|
lims/browser/reports/qualitycontrol_analysesrepeated.py
|
Python
|
agpl-3.0
| 5,554
| 0.00162
|
from dependencies.dependency import getToolByName
from lims.browser import BrowserView
from dependencies.dependency import ViewPageTemplateFile
from lims import bikaMessageFactory as _
from lims.utils import t
from lims.utils import formatDateQuery, formatDateParms
from dependencies.dependency import IViewView
from dependencies.dependency import implements
class Report(BrowserView):
implements(IViewView)
template = ViewPageTemplateFile("templates/report_out.pt")
def __init__(self, context, request, report=None):
self.report = report
BrowserView.__init__(self, context, request)
def __call__(self):
bac = getToolByName(self.context, 'bika_analysis_catalog')
self.report_content = {}
parm_lines = {}
parms = []
headings = {}
headings['header'] = _("Analyses retested")
headings['subheader'] = _("Analyses which have been retested")
count_all = 0
query = {'portal_type': 'Analysis',
'getRetested': True,
'sort_order': 'reverse'}
date_query = formatDateQuery(self.context, 'Received')
if date_query:
query['getDateReceived'] = date_query
received = formatDateParms(self.context, 'Received')
else:
received = 'Undefined'
parms.append(
{'title': _('Received'),
'value': received,
'type': 'text'})
wf_tool = getToolByName(self.context, 'portal_workflow')
if self.request.form.has_key('bika_analysis_workflow'):
query['review_state'] = self.request.form['bika_analysis_workflow']
review_state = wf_tool.getTitleForStateOnType(
self.request.form['bika_analysis_workflow'], 'Analysis')
else:
review_state = 'Undefined'
parms.append(
{'title': _('Status'),
'value': review_state,
'type': 'text'})
if self.request.form.has_key('bika_cancellation_workflow'):
query['cancellation_state'] = self.request.form[
'bika_cancellation_workflow']
cancellation_state = wf_tool.getTitleForStateOnType(
self.request.form['bika_cancellation_workflow'], 'Analysis')
else:
cancellation_state = 'Undefined'
parms.append(
{'title': _('Active'),
'value': cancellation_state,
'type': 'text'})
if self.request.form.has_key('bika_worksheetanalysis_workflow'):
query['worksheetanalysis_review_state'] = self.request.form[
'bika_worksheetanalysis_workflow']
ws_review_state = wf_tool.getTitleForStateOnType(
self.request.form['bika_worksheetanalysis_workflow'], 'Analysis')
else:
ws_review_state = 'Undefined'
parms.append(
{'title': _('Assigned to worksheet'),
'value': ws_review_state,
'type': 'text'})
# and now lets do the actual report lines
formats = {'columns': 8,
'col_heads': [_('Client'),
_('Request'),
_('Sample type'),
_('Sample point'),
_('Category'),
_('Analysis'),
_('Received'),
_('Status'),
],
'class': '',
}
datalines = []
clients = {}
sampletypes = {}
samplepoints = {}
categories = {}
services = {}
for a_proxy in bac(query):
analysis = a_proxy.getObject()
dataline = []
dataitem = {'value': analysis.getClientTitle()}
dataline.append(dataitem)
dataitem = {'value': analysis.getRequestID()}
dataline.append(dataitem)
dataitem = {'value': analysis.aq_parent.getSampleTypeTitle()}
dataline.append(dataitem)
dataitem = {'value': analysis.aq_parent.getSamplePointTitle()}
dataline.append(dataitem)
dataitem = {'value': analysis.getCategoryTitle()}
dataline.append(dataitem)
dataitem = {'value': analysis.getServiceTitle()}
dataline.append(dataitem)
dataitem = {'value': self.ulocalized_time(analysis.getDateReceived())}
dataline.append(dataitem)
state = wf_tool.getInfoFor(analysis, 'review_state', '')
review_state = wf_tool.getTitleForStateOnType(
state, 'Analysis')
dataitem = {'value': rev
|
iew_state}
dataline.append(dataitem)
datalines.append(dataline)
count_all += 1
# table footer data
footlines = []
footline = []
footitem = {'value': _('Number of analyses retested for period'),
'colspan': 7,
'class': 'total_label'}
footline.a
|
ppend(footitem)
footitem = {'value': count_all}
footline.append(footitem)
footlines.append(footline)
self.report_content = {
'headings': headings,
'parms': parms,
'formats': formats,
'datalines': datalines,
'footings': footlines}
title = t(headings['header'])
return {'report_title': title,
'report_data': self.template()}
|
LogicalDash/kivy
|
kivy/uix/gridlayout.py
|
Python
|
mit
| 19,254
| 0
|
'''
Grid Layout
===========
.. only:: html
.. image:: images/gridlayout.gif
:align: right
.. only:: latex
.. image:: images/gridlayout.png
:align: right
.. versionadded:: 1.0.4
The :class:`GridLayout` arranges children in a matrix. It takes the available
space and divides it into columns and rows, then adds widgets to the resulting
"cells".
.. versionchanged:: 1.0.7
The implementation has changed to use the widget size_hint for calculating
column/row sizes. `uniform_width` and `uniform_height` have been removed
and other properties have added to give you more control.
Background
----------
Unlike many other toolkits, you cannot explicitly place a widget in a specific
column/row. Each child is automatically assigned a position determined by the
layout configuration and the child's index in the children list.
A GridLayout must always have at least one input constraint:
:attr:`GridLayout.cols` or :attr:`GridLayout.rows`. If you do not specify cols
or rows, the Layout will throw an exception.
Column Width and Row Height
---------------------------
The column width/row height are determined in 3 steps:
- The initial size is given by the :attr:`col_default_width` and
:attr:`row_default_height` properties. To customize the size of a single
column or row, use :attr:`cols_minimum` or :attr:`rows_minimum`.
- The `size_hint_x`/`size_hint_y` of the children are taken into account.
If no widgets have a size hint, the maximum size is used for all
children.
- You can force the default size by setting the :attr:`col_force_default`
or :attr:`row_force_default` property. This will force the layout to
ignore the `width` and `size_hint` properties of children and use the
default size.
Using a GridLayout
------------------
In the example below, all widgets will have an equal size. By default, the
`size_hint` is (1, 1), so a Widget will take the full size of the parent::
layout = GridLayout(cols=2)
layout.add_widget(Button(text='Hello 1'))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2'))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_1.jpg
Now, let's fix the size of Hello buttons to 100px instead of using
size_hint_x=1::
layout = GridLayout(cols=2)
layout.add_widget(Button(text='Hello 1', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_2.jpg
Next, let's fix the row height to a specific size::
layout = GridLayout(cols=2, row_force_default=True, row_default_height=40)
layout.add_widget(Button(text='Hello 1', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 1'))
layout.add_widget(Button(text='Hello 2', size_hint_x=None, width=100))
layout.add_widget(Button(text='World 2'))
.. image:: images/gridlayout_3.jpg
'''
__all__ = ('GridLayout', 'GridLayoutException')
from kivy.logger import Logger
from kivy.uix.layout import Layout
from kivy.properties import NumericProperty, BooleanProperty, DictProperty, \
BoundedNumericProperty, ReferenceListProperty, VariableListProperty, \
ObjectProperty, StringProperty
from math import ceil
def nmax(*args):
# merge into one list
args = [x for x in args if x is not None]
return max(args)
def nmin(*args):
# merge into one list
args = [x for x in args if x is not None]
return min(args)
class GridLayoutException(Exception):
'''Exception for errors if the grid layout manipulation fails.
'''
pass
class GridLayout(Layout):
'''Grid layout class. See module documentation for more information.
'''
spacing = VariableListProperty([0, 0], length=2)
'''Spacing between children: [spacing_horizontal, spacing_vertical].
spacing also accepts a one argument form [spacing].
:attr:`spacing` is a
:class:`~kivy.properties.VariableListProperty` and defaults to [0, 0].
'''
padding = VariableListProperty([0, 0, 0, 0])
'''Padding between the layout box and it's children: [padding_left,
padding_top, padding_right, padding_bottom].
padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a one argument form [padding].
.. versionchanged:: 1.7.0
Replaced NumericProperty with VariableListProperty.
:attr:`padding` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [0, 0, 0, 0].
'''
cols = BoundedNumericProperty(None, min=0, allownone=True)
'''Number of columns in the grid.
.. versionchanged:: 1.0.8
Changed from a NumericProperty to BoundedNumericProperty. You can no
longer set this to a negative value.
:attr:`cols` is a :class:`~kivy.properties.NumericProperty` and defaults to
0.
'''
rows = BoundedNumericProperty(None, min
|
=0, allownone=True)
|
'''Number of rows in the grid.
.. versionchanged:: 1.0.8
Changed from a NumericProperty to a BoundedNumericProperty. You can no
longer set this to a negative value.
:attr:`rows` is a :class:`~kivy.properties.NumericProperty` and defaults to
0.
'''
col_default_width = NumericProperty(0)
'''Default minimum size to use for a column.
.. versionadded:: 1.0.7
:attr:`col_default_width` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.
'''
row_default_height = NumericProperty(0)
'''Default minimum size to use for row.
.. versionadded:: 1.0.7
:attr:`row_default_height` is a :class:`~kivy.properties.NumericProperty`
and defaults to 0.
'''
col_force_default = BooleanProperty(False)
'''If True, ignore the width and size_hint_x of the child and use the
default column width.
.. versionadded:: 1.0.7
:attr:`col_force_default` is a :class:`~kivy.properties.BooleanProperty`
and defaults to False.
'''
row_force_default = BooleanProperty(False)
'''If True, ignore the height and size_hint_y of the child and use the
default row height.
.. versionadded:: 1.0.7
:attr:`row_force_default` is a :class:`~kivy.properties.BooleanProperty`
and defaults to False.
'''
cols_minimum = DictProperty({})
'''Dict of minimum width for each column. The dictionary keys are the
column numbers, e.g. 0, 1, 2...
.. versionadded:: 1.0.7
:attr:`cols_minimum` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
rows_minimum = DictProperty({})
'''Dict of minimum height for each row. The dictionary keys are the
row numbers, e.g. 0, 1, 2...
.. versionadded:: 1.0.7
:attr:`rows_minimum` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
minimum_width = NumericProperty(0)
'''Automatically computed minimum width needed to contain all children.
.. versionadded:: 1.0.8
:attr:`minimum_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0. It is read only.
'''
minimum_height = NumericProperty(0)
'''Automatically computed minimum height needed to contain all children.
.. versionadded:: 1.0.8
:attr:`minimum_height` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0. It is read only.
'''
minimum_size = ReferenceListProperty(minimum_width, minimum_height)
'''Automatically computed minimum size needed to contain all children.
.. versionadded:: 1.0.8
:attr:`minimum_size` is a
:class:`~kivy.properties.ReferenceListProperty` of
(:attr:`minimum_width`, :attr:`minimum_height`) properties. It is read
only.
'''
def __init__(self, **kwargs):
self._cols = self._rows = None
super(GridLayout, self).__init__(**kwargs)
fbind = self.fbind
update = self._trigger_layout
fbind('col_default_width', update)
fbind('row_default_height', update)
fbind('col_force_default', update)
fbind('row_force_default', update)
|
zeit/now-cli
|
packages/now-cli/test/dev/fixtures/python-flask/api/user.py
|
Python
|
apache-2.0
| 247
| 0.004049
|
from flask import Flask, Response, request
app = Flask(__name__
|
)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def user(path):
name = request.args.g
|
et('name')
return Response("Hello %s" % (name), mimetype='text/html')
|
qris/mailer-dye
|
dye/fablib.py
|
Python
|
gpl-3.0
| 29,664
| 0.001584
|
import os
from os import path
from datetime import datetime
import getpass
import re
import time
from fabric.context_managers import cd, hide, settings
from fabric.operations import require, prompt, get, run, sudo, local
from fabric.state import env
from fabric.contrib import files
from fabric import utils
def _setup_paths(project_settings):
# first merge in variables from project_settings - but ignore __doc__ etc
user_settings = [x for x in vars(project_settings).keys() if not x.startswith('__')]
for setting in user_settings:
env[setting] = vars(project_settings)[setting]
# allow for project_settings having set up some of these differently
env.setdefault('verbose', False)
env.setdefault('use_sudo', True)
env.setdefault('cvs_rsh', 'CVS_RSH="ssh"')
env.setdefault('default_branch', {'production': 'master', 'staging': 'master'})
env.setdefault('server_project_home',
path.join(env.server_home, env.project_name))
# TODO: change dev -> current
env.setdefault('vcs_root_dir', path.join(env.server_project_home, 'dev'))
env.setdefault('prev_root', path.join(env.server_project_home, 'previous'))
env.setdefault('next_dir', path.join(env.server_project_home, 'next'))
env.setdefault('dump_dir', path.join(env.server_project_home, 'dbdumps'))
env.setdefault('deploy_d
|
ir', path.join(env.vcs_root_dir, 'deploy'))
env.setdefault('settings', '%(project_name)s.settings' % env)
if env.project_type == "django":
env.setdefault('relative_django_dir', env.project_name)
env.setdefault('relative_django_settings_dir', env['relative_django_dir'])
env.setdefau
|
lt('relative_ve_dir', path.join(env['relative_django_dir'], '.ve'))
# now create the absolute paths of everything else
env.setdefault('django_dir',
path.join(env['vcs_root_dir'], env['relative_django_dir']))
env.setdefault('django_settings_dir',
path.join(env['vcs_root_dir'], env['relative_django_settings_dir']))
env.setdefault('ve_dir',
path.join(env['vcs_root_dir'], env['relative_ve_dir']))
env.setdefault('manage_py', path.join(env['django_dir'], 'manage.py'))
# local_tasks_bin is the local copy of tasks.py
# this should be the copy from where ever fab.py is being run from ...
if 'DEPLOYDIR' in os.environ:
env.setdefault('local_tasks_bin',
path.join(os.environ['DEPLOYDIR'], 'tasks.py'))
else:
env.setdefault('local_tasks_bin',
path.join(path.dirname(__file__), 'tasks.py'))
# valid environments - used for require statements in fablib
env.valid_envs = env.host_list.keys()
def _linux_type():
if 'linux_type' not in env:
# work out if we're based on redhat or centos
# TODO: look up stackoverflow question about this.
if files.exists('/etc/redhat-release'):
env.linux_type = 'redhat'
elif files.exists('/etc/debian_version'):
env.linux_type = 'debian'
else:
# TODO: should we print a warning here?
utils.abort("could not determine linux type of server we're deploying to")
return env.linux_type
def _get_python():
if 'python_bin' not in env:
python26 = path.join('/', 'usr', 'bin', 'python2.6')
if files.exists(python26):
env.python_bin = python26
else:
env.python_bin = path.join('/', 'usr', 'bin', 'python')
return env.python_bin
def _get_tasks_bin():
if 'tasks_bin' not in env:
env.tasks_bin = path.join(env.deploy_dir, 'tasks.py')
return env.tasks_bin
def _tasks(tasks_args, verbose=False):
tasks_cmd = _get_tasks_bin()
if env.verbose or verbose:
tasks_cmd += ' -v'
sudo_or_run(tasks_cmd + ' ' + tasks_args)
def _get_svn_user_and_pass():
if 'svnuser' not in env or len(env.svnuser) == 0:
# prompt user for username
prompt('Enter SVN username:', 'svnuser')
if 'svnpass' not in env or len(env.svnpass) == 0:
# prompt user for password
env.svnpass = getpass.getpass('Enter SVN password:')
def verbose(verbose=True):
"""Set verbose output"""
env.verbose = verbose
def deploy_clean(revision=None):
""" delete the entire install and do a clean install """
if env.environment == 'production':
utils.abort('do not delete the production environment!!!')
require('server_project_home', provided_by=env.valid_envs)
# TODO: dump before cleaning database?
with settings(warn_only=True):
webserver_cmd('stop')
clean_db()
clean_files()
deploy(revision)
def clean_files():
sudo_or_run('rm -rf %s' % env.server_project_home)
def _create_dir_if_not_exists(path):
if not files.exists(path):
sudo_or_run('mkdir -p %s' % path)
def deploy(revision=None, keep=None):
""" update remote host environment (virtualenv, deploy, update)
It takes two arguments:
* revision is the VCS revision ID to checkout (if not specified then
the latest will be checked out)
* keep is the number of old versions to keep around for rollback (default
5)"""
require('server_project_home', provided_by=env.valid_envs)
check_for_local_changes()
_create_dir_if_not_exists(env.server_project_home)
# TODO: check if our live site is in <sitename>/dev/ - if so
# move it to <sitename>/current/ and make a link called dev/ to
# the current/ directory
# TODO: if dev/ is found to be a link, ask the user if the apache config
# has been updated to point at current/ - and if so then delete dev/
# _migrate_from_dev_to_current()
create_copy_for_next()
checkout_or_update(in_next=True, revision=revision)
# remove any old pyc files - essential if the .py file has been removed
if env.project_type == "django":
rm_pyc_files(path.join(env.next_dir, env.relative_django_dir))
# create the deploy virtualenv if we use it
create_deploy_virtualenv(in_next=True)
# we only have to disable this site after creating the rollback copy
# (do this so that apache carries on serving other sites on this server
# and the maintenance page for this vhost)
downtime_start = datetime.now()
link_webserver_conf(maintenance=True)
with settings(warn_only=True):
webserver_cmd('reload')
next_to_current_to_rollback()
# Use tasks.py deploy:env to actually do the deployment, including
# creating the virtualenv if it thinks it necessary, ignoring
# env.use_virtualenv as tasks.py knows nothing about it.
_tasks('deploy:' + env.environment)
# bring this vhost back in, reload the webserver and touch the WSGI
# handler (which reloads the wsgi app)
link_webserver_conf()
webserver_cmd('reload')
downtime_end = datetime.now()
touch_wsgi()
delete_old_rollback_versions(keep)
if env.environment == 'production':
setup_db_dumps()
_report_downtime(downtime_start, downtime_end)
def _report_downtime(downtime_start, downtime_end):
downtime = downtime_end - downtime_start
utils.puts("Downtime lasted for %.1f seconds" % downtime.total_seconds())
utils.puts("(Downtime started at %s and finished at %s)" %
(downtime_start, downtime_end))
def set_up_celery_daemon():
require('vcs_root_dir', 'project_name', provided_by=env)
for command in ('celerybeat', 'celeryd'):
command_project = command + '_' + env.project_name
celery_run_script_location = path.join(env['vcs_root_dir'],
'celery', 'init', command)
celery_run_script = path.join('/etc', 'init.d', command_project)
celery_configuration_location = path.join(env['vcs_root_dir'],
'celery', 'config', command)
celery_configuration_destination = path.join('/etc', 'default',
command_project)
sudo_or_run(" ".join(['cp', celery_run_script_location,
celery_run_script]))
sudo_
|
AlpacaDB/chainer
|
cupy/creation/from_data.py
|
Python
|
mit
| 3,203
| 0
|
import cupy
from cupy import core
def array(obj, dtype=None, copy=True, ndmin=0):
"""Creates an array on the current device.
This function currently does not support the ``order`` and ``subok``
options.
Args:
obj: :class:`cupy.ndarray` object or any other object that can be
passed to :func:`numpy.array`.
dtype: Data type specifier.
copy (bool): If ``False``, this function returns ``obj`` if possible.
Otherwise this function always returns a new array.
ndmin (int): Minimum number of dimensions. Ones are inserted to the
head of the
|
shape if needed.
Returns:
cupy.ndarray: An array on the current device.
.. seealso:: :func:`numpy.array`
"""
# TODO(beam2d): Support order and subok options
return core.array(obj, dtype, copy, ndmin)
def asarray(a, dtype=None):
|
"""Converts an object to array.
This is equivalent to ``array(a, dtype, copy=False)``.
This function currently does not support the ``order`` option.
Args:
a: The source object.
dtype: Data type specifier. It is inferred from the input by default.
Returns:
cupy.ndarray: An array on the current device. If ``a`` is already on
the device, no copy is performed.
.. seealso:: :func:`numpy.asarray`
"""
return cupy.array(a, dtype=dtype, copy=False)
def asanyarray(a, dtype=None):
"""Converts an object to array.
This is currently equivalent to :func:`~cupy.asarray`, since there is no
subclass of ndarray in CuPy. Note that the original
:func:`numpy.asanyarray` returns the input array as is if it is an instance
of a subtype of :class:`numpy.ndarray`.
.. seealso:: :func:`cupy.asarray`, :func:`numpy.asanyarray`
"""
return cupy.asarray(a, dtype)
def ascontiguousarray(a, dtype=None):
"""Returns a C-contiguous array.
Args:
a (cupy.ndarray): Source array.
dtype: Data type specifier.
Returns:
cupy.ndarray: If no copy is required, it returns ``a``. Otherwise, it
returns a copy of ``a``.
.. seealso:: :func:`numpy.ascontiguousarray`
"""
return core.ascontiguousarray(a, dtype)
# TODO(okuta): Implement asmatrix
def copy(a):
"""Creates a copy of a given array on the current device.
This function allocates the new array on the current device. If the given
array is allocated on the different device, then this function tries to
copy the contents over the devices.
Args:
a (cupy.ndarray): The source array.
Returns:
cupy.ndarray: The copy of ``a`` on the current device.
See: :func:`numpy.copy`, :meth:`cupy.ndarray.copy`
"""
# If the current device is different from the device of ``a``, then this
# function allocates a new array on the current device, and copies the
# contents over the devices.
# TODO(beam2d): Support ordering option
return a.copy()
# TODO(okuta): Implement frombuffer
# TODO(okuta): Implement fromfile
# TODO(okuta): Implement fromfunction
# TODO(okuta): Implement fromiter
# TODO(okuta): Implement fromstring
# TODO(okuta): Implement loadtxt
|
chanhou/refine-client-py
|
parse_paper.py
|
Python
|
gpl-3.0
| 289
| 0.020761
|
with open('/tmp2/MicrosoftAcademicGraph/Papers.txt',
|
'r') as f, open('/tmp2/MicrosoftAcademicGraph_refine/papers_1_column.txt','w') as b:
for line in f:
a = line.split('\t')
#
|
a = a[1].split('\r')
#b.write(a[0]+a[1])
b.write(a[2]+'\n')
#break
|
asifmadnan/DIGITS
|
digits/model/tasks/caffe_train.py
|
Python
|
bsd-3-clause
| 54,494
| 0.004
|
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import os
import re
import time
import math
import subprocess
import numpy as np
from google.protobuf import text_format
import caffe
try:
import caffe_pb2
except ImportError:
# See issue #32
from caffe.proto import caffe_pb2
from train import TrainTask
from digits.config import config_value
from digits.status import Status
from digits import utils, dataset
from digits.utils import subclass, override, constants
# NOTE: Increment this everytime the pickled object changes
PICKLE_VERSION = 2
@subclass
class CaffeTrainTask(TrainTask):
"""
Trains a caffe model
"""
CAFFE_LOG = 'caffe_output.log'
@staticmethod
def upgrade_network(network):
#TODO
pass
def __init__(self, network, **kwargs):
"""
Arguments:
network -- a caffe NetParameter defining the network
"""
super(CaffeTrainTask, self).__init__(**kwargs)
self.pickver_task_caffe_train = PICKLE_VERSION
self.network = network
self.current_iteration = 0
self.loaded_snapshot_file = None
self.loaded_snapshot_epoch = None
self.image_mean = None
self.solver = None
self.solver_file = constants.CAFFE_SOLVER_FILE
self.train_val_file = constants.CAFFE_TRAIN_VAL_FILE
self.snapshot_prefix = constants.CAFFE_SNAPSHOT_PREFIX
self.deploy_file = constants.CAFFE_DEPLOY_FILE
self.caffe_log_file = self.CAFFE_LOG
def __getstate__(self):
state = super(CaffeTrainTask, self).__getstate__()
# Don't pickle these things
if 'caffe_log' in state:
del state['caffe_log']
if '_transformer' in state:
del state['_transformer']
if '_caffe_net' in state:
del state['_caffe_net']
return state
def __setstate__(self, state):
super(CaffeTrainTask, self).__setstate__(state)
# Upgrade pickle file
if state['pickver_task_caffe_train'] == 1:
print 'upgrading %s' % self.job_id
self.caffe_log_file = self.CAFFE_LOG
self.pickver_task_caffe_train = PICKLE_VERSION
# Make changes to self
self.loaded_snapshot_file = None
self.loaded_snapshot_epoch = None
# These things don't get pickled
self.image_mean = None
### Task overrides
@override
def name(self):
return 'Train Caffe Model'
@override
def before_run(self):
super(CaffeTrainTask, self).before_run()
if isinstance(self.dataset, dataset.ImageClassificationDatasetJob):
self.save_files_classification()
elif isinstance(self.dataset, dataset.GenericImageDatasetJob):
self.save_files_generic()
else:
raise NotImplementedError
self.caffe_log = open(self.path(self.CAFFE_LOG), 'a')
self.saving_snapshot = False
self.receiving_train_output = False
self.receiving_val_output = False
self.last_train_update = None
return True
# TODO merge these monolithic save_files functions
# TODO break them up into separate functions
def save_files_classification(self):
"""
Save solver, train_val and deploy files to disk
"""
has_val_set = self.dataset.val_db_task() is not None
### Check what has been specified in self.network
tops = []
bottoms = {}
train_data_layer = None
val_data_layer = None
hidden_layers = caffe_pb2.NetParameter()
loss_layers = []
accuracy_layers = []
for layer in self.network.layer:
assert layer.type not in ['MemoryData', 'HDF5Data', 'ImageData'], 'unsupported data layer type'
if layer.type == 'Data':
for rule in layer.include:
if rule.phase == caffe_pb2.TRAIN:
assert train_data_layer is None, 'cannot specify two train data layers'
train_data_layer = layer
elif rule.phase == caffe_pb2.TEST:
assert val_data_layer is None, 'cannot specify two test data layers'
val_data_layer = layer
elif layer.type == 'SoftmaxWithLoss':
loss_layers.append(layer)
elif layer.type == 'Accuracy':
addThis = True
if layer.accuracy_param.HasField('top_k'):
if layer.accuracy_param.top_k >= len(self.get_labels()):
self.logger.warning('Removing layer %s because top_k=%s while there are are only %s labels in this dataset' % (layer.name, layer.accuracy_param.top_k, len(self.get_labels())))
addThis = False
if addThis:
accuracy_layers.append(layer)
else:
hidden_layers.layer.add().CopyFrom(layer)
if len(layer.bottom) == 1 and len(layer.top) == 1 and layer.bottom[0] == layer.top[0]:
pass
else:
for top in layer.top:
tops.append(top)
for bottom in layer.bottom:
bottoms[bottom] = True
if train_data_layer is None:
assert val_data_layer is None, 'cannot specify a test data layer without a train data layer'
assert len(loss_layers) > 0, 'must specify a loss layer'
network_outputs = []
for n
|
ame in tops:
if name not in bottoms:
network_outputs.append(name)
assert len(network_outputs), 'network must have an output'
# Update num_output for any output InnerProduct layers automatically
for layer in hidden_layers.layer:
if layer.type == 'InnerProduct':
for top in layer.top:
if top in network_outputs:
layer.inner_product_param.num_output = len(self.get_labe
|
ls())
break
### Write train_val file
train_val_network = caffe_pb2.NetParameter()
# data layers
if train_data_layer is not None:
if train_data_layer.HasField('data_param'):
assert not train_data_layer.data_param.HasField('source'), "don't set the data_param.source"
assert not train_data_layer.data_param.HasField('backend'), "don't set the data_param.backend"
max_crop_size = min(self.dataset.image_dims[0], self.dataset.image_dims[1])
if self.crop_size:
assert self.crop_size <= max_crop_size, 'crop_size is larger than the image size'
train_data_layer.transform_param.crop_size = self.crop_size
elif train_data_layer.transform_param.HasField('crop_size'):
cs = train_data_layer.transform_param.crop_size
if cs > max_crop_size:
# don't throw an error here
cs = max_crop_size
train_data_layer.transform_param.crop_size = cs
self.crop_size = cs
train_val_network.layer.add().CopyFrom(train_data_layer)
train_data_layer = train_val_network.layer[-1]
if val_data_layer is not None and has_val_set:
if val_data_layer.HasField('data_param'):
assert not val_data_layer.data_param.HasField('source'), "don't set the data_param.source"
assert not val_data_layer.data_param.HasField('backend'), "don't set the data_param.backend"
if self.crop_size:
# use our error checking from the train layer
val_data_layer.transform_param.crop_size = self.crop_size
train_val_network.layer.add().CopyFrom(val_data_layer)
val_data_layer = train_val_network.layer[-1]
else:
train_data_layer = train_val_network.layer.add(type = 'Data', name = 'data')
train_data_layer.top.append('data')
train_data_layer.top.append('label')
train_data_layer
|
daveinnyc/various
|
project_euler/036.double_palindromes.py
|
Python
|
mit
| 761
| 0.005256
|
'''
Problem 036
The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
Find the sum of all numbers, less than one million, which are palindromi
|
c in
base 10 and base 2.
(Please note that the palindromic number, in either base, may not include
leading zeros.)
Solution: Copyright 2017 Dave Cuthbert, MIT License
'''
def is_palindrome(number):
if str(number) == str(number)[::-1]:
return True
return False
def solve_problem(limit):
palindromes = []
for n in range(1, limit):
if is_palindrome(n):
if is_palindrome(format(n
|
, 'b')):
palindromes.append(n)
return(sum(palindromes))
if __name__ == "__main__":
limit = 1000000
print(solve_problem(limit))
|
ngoduykhanh/PowerDNS-Admin
|
run.py
|
Python
|
mit
| 224
| 0.013393
|
#!/usr
|
/bin/env python3
from powerdnsadmin import create_app
if __name__ == '__main__':
app = create_app()
app.run(debug = True, host=app.config.get('BIND_ADDRESS', '127.0.0.1'), port=app.config.get('PORT', '
|
9191'))
|
fengbohello/practice
|
python/dict/fromkeys.py
|
Python
|
lgpl-3.0
| 648
| 0.016975
|
dc = {'a' : 'a-ele', 'b' : 'b-ele', 'c' : 'c-ele'}
print "id(dc) = ["+ str(id(dc)) +"] dict is : " + str(dc)
print "
|
========================"
x = dc.fromkeys(dc, 'x-ele')
print "type of dc.fromkeys(dc, 'x-ele') = [" + str(type(x)) + "]"
print x
print "========================"
x = dict.fromkeys(dc, 'dict-ele')
print "type of dict.fromkeys(dc, 'x-ele') = [" + str(type(x)) + "]"
print "id(x) = ["+ str(id(x)) +"], x = ["+ str(x) +"]"
print "========================"
x = dc.fromkeys(dc)
print "type of dc.fromkeys(dc) = [" +
|
str(type(x)) + "]"
print x
print "========================"
print "id(dc) = ["+ str(id(dc)) +"] dict is : " + str(dc)
|
uclouvain/osis
|
base/tests/factories/entity_version.py
|
Python
|
agpl-3.0
| 2,480
| 0.001614
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import datetime
import itertools
import string
import factory
from base.models.enums import entity_type, organization_type
from base.tests.factories.entity import EntityFactory
def generate_acronyms():
acronyms_letters_generator = itertools.permutations(string.ascii_uppercase, r=4)
for acronym_letters in acronyms_letters_generator:
yield "".join(acronym_letters)
class EntityVersionFactory(factory.DjangoModelFactory):
|
class Meta:
model = 'base.EntityVersion'
entity = factory.SubFactory(EntityFactory)
title = factory.Faker('company')
acrony
|
m = factory.Iterator(generate_acronyms())
entity_type = factory.Iterator(entity_type.ENTITY_TYPES, getter=lambda c: c[0])
parent = factory.SubFactory(EntityFactory)
start_date = datetime.date(2015, 1, 1).isoformat()
end_date = None
class Params:
sector = factory.Trait(entity_type=entity_type.SECTOR)
faculty = factory.Trait(entity_type=entity_type.FACULTY)
class MainEntityVersionFactory(EntityVersionFactory):
entity = factory.SubFactory(EntityFactory, organization__type=organization_type.MAIN)
entity_type = factory.Iterator(entity_type.PEDAGOGICAL_ENTITY_TYPES)
|
jdp/urp
|
urp.py
|
Python
|
mit
| 7,836
| 0.004977
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import itertools
import os
import sys
try:
from urllib import quote_plus, urlencode
from urlparse import parse_qsl, urlparse, urlunparse
except ImportError:
from urllib.parse import parse_qsl, quote_plus, urlencode, urlparse, urlunparse
ERR_INVALID_PAIR = 3
def parse(args, data):
url = urlparse(data)
query = url.query
if not args.no_query_params:
query = parse_qsl(url.query)
return url, query
def build_authority(username, password, hostname, port):
netloc = hostname
if username or password:
auth = username + ':' + password
netloc = auth + '@' + netloc
if port:
netloc += ':' + port
return netloc
def process(args, url, query):
scheme = args.scheme or url.scheme
username = args.username or (url.username or '')
password = args.password or (url.password or '')
hostname = args.hostname or (url.hostname or '')
port = str(args.port or (url.port or ''))
params = args.params or url.params
fragment = args.fragment or url.fragment
authority = build_authority(username, password, hostname, port)
path = url.path
if args.path:
if args.path.startswith('/'):
path = args.path
else:
path = os.path.join(url.path, args.path)
path = os.path.normpath(path)
if args.no_query_params:
if args.query:
query = args.query
if args.queries:
query += ''.join(args.queries)
if args.no_url_encoding:
encoded_query = query
else:
encoded_query = quote_plus(query)
else:
if args.query:
query = parse_qsl(args.query)
if args.queries:
query.extend(p.split('=', 2) for p in args.queries)
query = [(q, v) for q, v in query if q not in args.ignored_queries]
if args.sort_query:
query = sorted(query, key=lambda p: p[0])
if args.no_url_encoding:
encoded_query = '&'.join('='.join(p) for p in query)
else:
encoded_query = urlencode(query)
suppress_default = False
if args.print_scheme:
suppress_default = True
yield scheme
if args.print_username:
suppress_default = True
yield username
if args.print_password:
suppress_default = True
yield password
if args.print_hostname:
suppress_default = True
yield hostname
if args.print_port:
suppress_default = True
yield port
if args.print_authority:
suppress_default = True
yield authority
if args.print_path:
suppress_default = True
yield path
if args.print_params:
suppress_default = True
yield params
if args.print_query:
suppress_default = True
yield encoded_query
if args.
|
query_value and not args.no_query_params:
suppress_default = True
# Would be nice to make `qu
|
ery_map` a defaultdict, but that would
# restrict this program to newer Python versions.
query_map = {}
for q, v in query:
if q not in query_map:
query_map[q] = []
query_map[q].append(v)
for q in args.query_value:
for v in query_map.get(q, ['']):
yield v
if args.print_query_names and not args.no_query_params:
suppress_default = True
for q in query:
yield q[0]
if args.print_query_values and not args.no_query_params:
suppress_default = True
for q in query:
yield q[1]
if args.print_fragment:
suppress_default = True
yield fragment
if not suppress_default:
yield urlunparse((scheme, authority, path, params, encoded_query, fragment))
def main():
ap = argparse.ArgumentParser(description='extract and modify URL features')
# URL-printing options
ap.add_argument('-s', '--print-scheme', action='store_true', dest='print_scheme', help="print scheme")
ap.add_argument('-u', '--print-username', action='store_true', dest='print_username', help="print username")
ap.add_argument('-w', '--print-password', action='store_true', dest='print_password', help="print password")
ap.add_argument('-o', '--print-hostname', action='store_true', dest='print_hostname', help="print hostname")
ap.add_argument('-p', '--print-port', action='store_true', dest='print_port', help="print port")
ap.add_argument('-a', '--print-authority', action='store_true', dest='print_authority', help="print authority")
ap.add_argument('-d', '--print-path', action='store_true', dest='print_path', help="print path")
ap.add_argument( '--print-params', action='store_true', dest='print_params', help="print params")
ap.add_argument('-q', '--print-query', action='store_true', dest='print_query', help="print query string")
ap.add_argument( '--print-query-names', action='store_true', dest='print_query_names', help="print only query parameter names")
ap.add_argument( '--print-query-values', action='store_true', dest='print_query_values', help="print only query parameter values")
ap.add_argument('-f', '--print-fragment', action='store_true', dest='print_fragment', help="print fragment")
ap.add_argument('-g', '--print-query-value', action='append', metavar='QUERY', dest='query_value', help="print value of query parameter")
# URL-mutating options
ap.add_argument('-S', '--scheme', action='store', dest='scheme', help="set scheme")
ap.add_argument('-U', '--username', action='store', dest='username', help="set username")
ap.add_argument('-W', '--password', action='store', dest='password', help="set password")
ap.add_argument('-O', '--hostname', action='store', dest='hostname', help="set hostname")
ap.add_argument('-P', '--port', action='store', dest='port', help="set port")
ap.add_argument('-D', '--path', action='store', dest='path', help="set or append path")
ap.add_argument( '--params', action='store', dest='params', help="set params")
ap.add_argument( '--query', action='store', dest='query', help="set query")
ap.add_argument('-Q', '--append-query', metavar='NAME=VALUE', action='append', dest='queries', default=[], help="append query parameter")
ap.add_argument('-F', '--fragment', action='store', dest='fragment', help="set fragment")
# Behavior-modifying options
ap.add_argument( '--no-url-encoding', action='store_true', help="disable URL encoding")
ap.add_argument( '--no-query-params', action='store_true', help="disable query parameter parsing")
ap.add_argument( '--sort-query', action='store_true', help="sort printed query parameters by name")
ap.add_argument('-x', '--ignore-query', action='append', dest='ignored_queries', metavar='QUERY', default=[], help="ignore query parameter")
ap.add_argument( '--version', action='version', version='%(prog)s 0.1.1')
# Positional arguments
ap.add_argument('urls', nargs='*', metavar='URL')
args = ap.parse_args()
for pair in args.queries:
if '=' not in pair:
sys.stderr.write("invalid name=value pair: {}\n".format(pair))
sys.exit(ERR_INVALID_PAIR)
# Use the field and record separators from the environment
ofs = os.environ.get('OFS', ' ')
rs = os.environ.get('RS', '\n')
inputs = []
if not sys.stdin.isatty():
inputs.append(sys.stdin)
inputs.append(args.urls)
for line in itertools.chain(*inputs):
url, query = parse(args, line.strip())
output = process(args, url, query)
sys.stdout.write(ofs.join(output))
sys.stdout.write(rs)
if __name__ == '__main__':
main()
|
skycucumber/Messaging-Gateway
|
webapp/venv/lib/python2.7/site-packages/twisted/test/test_threadable.py
|
Python
|
gpl-2.0
| 3,760
| 0.002926
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.threadable}.
"""
from __future__ import division, absolute_import
import sys, pickle
try:
import threading
except ImportError:
threadingSkip = "Platform lacks thread support"
else:
threadingSkip = None
from twisted.python.compat import _PY3
from twisted.trial import unittest
from twisted.python import threadable
class TestObject:
synchronized = ['aMethod']
x = -1
y = 1
def aMethod(self):
for i in range(10):
self.x, self.y = self.y, self.x
self.z = self.x + self.y
assert self.z == 0, "z == %d, not 0 as expected" % (self.z,)
threadable.synchronize(TestObject)
class SynchronizationTestCase(unittest.SynchronousTestCase):
def setUp(self):
"""
Reduce the CPython check interval so that thread switches happen much
more often, hopefully exercising more possible race conditions. Also,
delay actual test startup until the reactor has been started.
"""
if _PY3:
if getattr(sys, 'getswitchinterval', None) is not None:
self.addCleanup(sys.setswitchinterval, sys.getswitchinterval())
sys.setswitchinterval(0.0000001)
else:
if getattr(sys, 'getcheckinterval', None) is not None:
self.addCleanup(sys.setcheckinterval, sys.getcheckinterval())
sys.setcheckinterval(7)
def test_synchronizedName(self):
"""
The name of a synchronized method is inaffected by the synchronization
decorator.
"""
self.assertEqual("aMethod", TestObject.aMethod.__name__)
def test_isInIOThread(self):
"""
L{threadable.isInIOThread} returns C{True} if and only if it is called
in the same thread as L{threadable.registerAsIOThread}.
"""
threadable.registerAsIOThread()
foreignResult = []
t = threading.Thread(
target=lambda: foreignResult.append(threadable.isInIOThread()))
t.start()
t.join()
self.assertFalse(
foreignResult[0], "Non-IO thread reported as IO thread")
self.assertTrue(
threadable.isInIOThread(), "IO thread reported as not IO thread")
def testThreadedSynchronization(self):
o = TestObject()
errors = [
|
]
def callMethodLots():
try:
for i in range(1000):
o.aMethod()
except AssertionError as e:
errors.append(str(e))
threads = []
for x in range(5):
|
t = threading.Thread(target=callMethodLots)
threads.append(t)
t.start()
for t in threads:
t.join()
if errors:
raise unittest.FailTest(errors)
if threadingSkip is not None:
testThreadedSynchronization.skip = threadingSkip
test_isInIOThread.skip = threadingSkip
def testUnthreadedSynchronization(self):
o = TestObject()
for i in range(1000):
o.aMethod()
class SerializationTestCase(unittest.SynchronousTestCase):
def testPickling(self):
lock = threadable.XLock()
lockType = type(lock)
lockPickle = pickle.dumps(lock)
newLock = pickle.loads(lockPickle)
self.assertTrue(isinstance(newLock, lockType))
if threadingSkip is not None:
testPickling.skip = threadingSkip
def testUnpickling(self):
lockPickle = b'ctwisted.python.threadable\nunpickle_lock\np0\n(tp1\nRp2\n.'
lock = pickle.loads(lockPickle)
newPickle = pickle.dumps(lock, 2)
newLock = pickle.loads(newPickle)
|
gantzgraf/vape
|
test/test_sample_filters.py
|
Python
|
gpl-3.0
| 6,155
| 0.0013
|
from .utils import *
vep_and_snpeff_inputs = [(input_prefix + '.vcf.gz', False),
(input_prefix + '.snpeff.vcf.gz', True)]
def test_case_control():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
cases=['Sample3', 'Sample2'],
controls=['Sample1'],
het_ab=0.005,
gq=20,
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_de_novo():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test.ped"),
de_novo=True,
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_de_novo2():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test.ped"),
de_novo=True,
max_alt_alleles=1,
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_de_novo3():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test.ped"),
de_novo=True,
het_ab=0.25,
max_alt_alleles=1,
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_de_novo_no_csq():
output = get_tmp_out()
test_args = dict(
input=os.path.join(dir_path, 'test_data', 'ex9.vcf.gz'),
ped=os.path.join(dir_path, "test_data", "test.ped"),
de_novo=True,
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_biallelic():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test.ped"),
biallelic=True,
csq=[],
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_biallelic2():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test.ped"),
biallelic=True,
impact=['HIGH', 'MODERATE'],
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_biallelic3():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test.ped"),
biallelic=True,
impact=['HIGH'],
output=output,
)
results, expected = run_args(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
def test_biallelic_no_ped():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
singleton_recessive=['Sample1'],
seg_controls=['Sample2', 'Sample3'],
csq=[],
output=output,
)
results, expected = run_args(test_args, output, "test_biallelic")
assert_equal(results, expected)
os.remove(output)
def test_biallelic_seg_control():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test3.ped"),
singleton_recessive=['Sample1'],
seg_controls=['Sample2', 'Sample3'],
csq=[],
output=output,
)
assert_raises(ValueError, run_args, test_args)
test_args = dict(
ped=os.path.join(dir_path, "test_data", "test3.ped"),
biallelic=True,
seg_controls=['Sample2', 'Sample3'],
csq=[],
output=output,
)
results, expected = run_args(test_args, output, "test_biallelic")
assert_equal(results, expected)
os.remove(output)
def test_dominant():
for vcf, snpeff in vep_and_snpeff_inputs:
output = get_tmp_out()
test_args = dict(
input=vcf,
snpeff=snpeff,
ped=os.path.join(dir_path, "test_data", "test2.ped"),
dominant=True,
csq=[],
output=output,
)
results, expected = run_args(test_args, output,
|
sys._getframe().f_code.co_name)
asser
|
t_equal(results, expected)
os.remove(output)
if __name__ == '__main__':
import nose
nose.run(defaultTest=__name__)
|
LibreHealthIO/community-infra
|
ansible/files/monitoring/ssl_check_expire_days.py
|
Python
|
mpl-2.0
| 735
| 0.005442
|
import time
import datetime
from OpenSSL import crypto as c
from checks import AgentCheck
class SSLCheckExpireDays(AgentCheck):
def check(self, instance):
metric = "ssl.expire_in_days"
certfile = instance['cert']
cert_tag = 'cert:%s' % (certfile.split('/')[-1:],)
date_format = "%Y%m%d%H%M%SZ"
cert = c.load_certificate(c.FILETYPE_PEM, file(certfile).read())
output = cert.get_notAfter()
if output:
d0 = datetime.datetime.
|
today()
d1 = datetime.datetime(*(time.strptime(output, date_format)[0:3]))
delta = d1 - d0
self.gauge(metric, int(delta.days), tags=[cert
|
_tag])
else:
self.gauge(metric, -1, tags=[cert_tag])
|
legaultmarc/grstools
|
grstools/scripts/mendelian_randomization.py
|
Python
|
mit
| 3,401
| 0
|
"""
Compute the GRS from genotypes and a GRS file.
"""
# This file is part of grstools.
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Marc-Andre Legault
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import argparse
from .evaluate import _add_phenotype_arguments
from ..utils import mr_effect_estimate, _create_genetest_phenotypes
logger = logging.getLogger(__name__)
def main():
args = parse_args()
phenotypes = _create_genetest_phenotypes(
args.grs_filename,
|
args.phenotypes_filename,
args.phenotypes_sample_column, args.phenotypes_separator
)
if args.outcome_type == "continuous":
y_g_test = "linear"
elif args.outcome_type == "discrete":
y_g_test = "logistic"
else:
raise ValueError(
"Expected outcome type to be 'discrete' or 'continuous'."
)
if args.exposure_type == "continuous":
x_g_test = "
|
linear"
elif args.exposure_type == "discrete":
x_g_test = "logistic"
else:
raise ValueError(
"Expected exposure type to be 'discrete' or 'continuous'."
)
n_iter = 1000
logger.info(
"Computing MR estimates using the ratio method. Bootstrapping "
"standard errors can take some time."
)
beta, low, high = mr_effect_estimate(
phenotypes, args.outcome, args.exposure, n_iter, y_g_test, x_g_test
)
print("The estimated beta of the exposure on the outcome and its 95% CI "
"(computed using the empirical " "bootstrap) are:\n")
print("{:.4g} ({:.4g}, {:.4g})".format(beta, low, high))
def parse_args():
parser = argparse.ArgumentParser(
description=(
"Estimate the effect of an exposure on an outcome using "
"a GRS with an effect on the exposure.\n"
"Estimates are done using the ratio method."
)
)
parser.add_argument("--grs-filename", type=str)
parser.add_argument("--exposure", type=str)
parser.add_argument("--outcome", type=str)
parser.add_argument(
"--exposure-type", type=str,
help="Either continuous or discrete.",
default="continuous"
)
parser.add_argument(
"--outcome-type", type=str,
help="Either continuous or discrete.",
default="continuous"
)
_add_phenotype_arguments(parser)
return parser.parse_args()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.