repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Parsl/parsl | parsl/tests/test_python_apps/test_memoize_1.py | Python | apache-2.0 | 976 | 0 | import argparse
import parsl
from parsl.app.app import python_app
from parsl.tests.configs.local_threads import config
@python_app(cache=True)
def random_uuid(x, cache=True):
import uuid
return str(uuid.uuid4())
def test_python_memoization(n=2):
"""Testing python memoization disable
"""
x = random_uuid(0)
print(x.result())
for i in range(0, n):
foo = random_uuid(0)
print(foo.result())
assert foo.result() == x.result(), "Memoized results were not used"
if __name__ == '__mai | n__':
parsl.clear()
parsl.load(config)
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default="10",
help="Count of apps to launch")
parser.add_argument("-d", "--debug", action='store_true',
help="Count of apps to launch")
args = parser.parse_args()
if args.debug:
parsl.set_ | stream_logger()
x = test_python_memoization(n=4)
|
jiquintana/proxy-final | db_model/db_layer.py | Python | gpl-3.0 | 30,743 | 0.009791 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: ts=4:sw=4:sts=4:ai:et:fileencoding=utf-8:number
import sys
if sys.version_info < (3, 0):
python_OldVersion = True
else:
python_OldVersion = False
import pprint
from sqlalchemy import Column, ForeignKey, Integer, String, Boolean, Table, or_, CHAR, Enum
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, scoped_session, sessionmaker
from sqlalchemy import create_engine, MetaData, event
import datetime
if python_OldVersion:
import string
else:
import binascii
TraceSQL = True
MAXUSERS = 1024
MAXGROUPS = 65536
DRIVER = 'sqlite:///proxy.db'
#DRIVER = 'sqlite://'
engine = create_engine(DRIVER, echo=TraceSQL)
Base = declarative_base()
#Base.metadata.bind = engine
#metadata = MetaData()
HOURS_IDX = {
0: 'H00_M', 1: 'H01_M', 2: 'H02_M', 3: 'H03_M',
4: 'H04_M', 5: 'H05_M', 6: 'H06_M', 7: 'H07_M',
8: 'H08_M', 9: 'H09_M', 10: 'H10_M', 11: 'H11_M',
12: 'H12_M', 13: 'H13_M', 14: 'H14_M', 15: 'H15_M',
16: 'H16_M', 17: 'H17_M', 18: 'H18_M', 19: 'H19_M',
20: 'H20_M', 21: 'H21_M', 22: 'H22_M', 23: 'H23_M'
}
HOURS_MASK = {
# 2 1 0
# 321098765432109876543210
'NON_M' : 0b000000000000000000000000, # NONE ASIGNED
'H00_M' : 0b000000000000000000000001, # 00.xxh mask
'H01_M' : 0b000000000000000000000010, # 01.xxh mask
'H02_M' : 0b000000000000000000000100, # 02.xxh mask
'H03_M' : 0b000000000000000000001000, # 03.xxh mask
'H04_M' : 0b000000000000000000010000, # 04.xxh mask
'H05_M' : 0b000000000000000000100000, # 05.xxh mask
'H06_M' : 0b000000000000000001000000, # 06.xxh mask
'H07_M' : 0b0000000000000000100 | 00000, # 07. | xxh mask
'H08_M' : 0b000000000000000100000000, # 08.xxh mask
'H09_M' : 0b000000000000001000000000, # 09.xxh mask
'H10_M' : 0b000000000000010000000000, # 10.xxh mask
'H11_M' : 0b000000000000100000000000, # 11.xxh mask
'H12_M' : 0b000000000001000000000000, # 12.xxh mask
'H13_M' : 0b000000000010000000000000, # 13.xxh mask
'H14_M' : 0b000000000100000000000000, # 14.xxh mask
'H15_M' : 0b000000001000000000000000, # 15.xxh mask
'H16_M' : 0b000000010000000000000000, # 16.xxh mask
'H17_M' : 0b000000100000000000000000, # 17.xxh mask
'H18_M' : 0b000001000000000000000000, # 18.xxh mask
'H19_M' : 0b000010000000000000000000, # 19.xxh mask
'H20_M' : 0b000100000000000000000000, # 20.xxh mask
'H21_M' : 0b001000000000000000000000, # 21.xxh mask
'H22_M' : 0b010000000000000000000000, # 22.xxh mask
'H23_M' : 0b100000000000000000000000, # 23.xxh mask
'NIG_M' : 0b111000000000000011111111, # 00.xxh - 07.xxh && 21.xxh-23.xxh mask
'DAY_M' : 0b000111111111111100000000, # 08.xxh - 20.xxh mask
'ALL_M' : 0b111111111111111111111111 # ALL hours mask
}
def __bitwise_not_hours(hours):
return hours ^ 0xFFFFFF
@event.listens_for(engine, "connect")
def _fk_pragma_on_connect(dbapi_con, con_record):
dbapi_con.execute('PRAGMA journal_mode=MEMORY')
class Singleton(object):
__instance = None
def __new__(cls):
if Singleton.__instance is None:
Singleton.__instance = object.__new__(cls)
return Singleton.__instance
class Database(Singleton):
__initialized__ = False
__engine__ = None
__DBSession__ = None
__BASE__ = None
session = None
def __init__(self):
if not self.__initialized__:
print("not initialized")
self.__initialized__ = True
self.__engine__ = engine
self.__metadata__ = MetaData()
self.__BASE__=Base
self.__BASE__.metadata.bind=self.__engine__
self.__BASE__.metadata.create_all(self.__engine__)
self.__DBSession__ = scoped_session(sessionmaker())
self.__DBSession__.configure(bind=self.__engine__)
self.session = self.__DBSession__()
else:
print("already initialized")
def findUser(self, str2find):
users_found = self.session.\
query(User).\
filter( \
or_( User.username==str2find, User.description==str2find )
).\
all()
if users_found == []:
users_found = self.session.\
query(User).\
filter( \
or_( User.username.ilike("%"+str2find+"%"), User.description.ilike("%"+str2find+"%"))
).\
all()
return users_found
def findUserByUsername(self, username):
users_found = self.session.\
query(User).\
filter(User.username==username).\
first()
return users_found
def findUserByUID(self, uid):
users_found = self.session.\
query(User).\
filter(User.uid==uid).\
first()
return users_found
def getAllUser(self):
users_found = self.session.\
query(User).\
all()
return users_found
def getLowestUnusedUIDfromUser(self):
theUID=None
uids_found = [r for (r, ) in self.session.query(User.uid).all()]
for testUID in range(1,MAXUSERS+1):
if (theUID == None) and not (testUID in uids_found):
theUID = testUID
return theUID
def addUser(self,newUser):
theUID = None
theGID = -1
transaction_succesful=False
# Obtenemos el UID libre mas bajo; el uid no supera el máximo de MAXUSERS usrs
theUID = self.getLowestUnusedUIDfromUser()
# Comprobamos que el grupo no exista, y si esta libre, creamos el UID y GID con el mismo valor:
# Correspondencia 1:1 entre usuario y grupo cuando el UID<MAXUSERS
if (theUID != None) and (self.findGroupByGID(theUID) == None):
theGID = theUID
testUSR = self.findUserByUsername(newUser.username)
testGRP = self.findGroupByGroupname('dfl_grp_'+newUser.username)
# Comprobamos si ya existe un usuario o grupo con igual clave primaria
if (testUSR != None) or (testGRP != None):
# Existe: forzamos NO ejecucion...
theGID = -1
# Si hemos encontrado un usuario libre y el grupo libre...
# y ademas no existen las claves primarias de usuario.username y grupo.groupname...
# => (todo OK hasta el momento...)
# en la inicializacion hemos forzado valores diferentes para evitar que esta comparacion sea
# cierta por defecto
if theUID == theGID:
# Todos los valores recibidos en newUser, salvo el UID, son validos. Ahora alocamos el UID
newUser.uid = theUID
newGroup = Group()
newGroup.gid=theGID
newGroup.description='Grupo de '+newUser.description
newGroup.groupname='dfl_grp_'+newUser.username
relacionUsrGrp = Groups(newUser, newGroup)
# Tenemos todo preparado... intentamos ejecutar la transaccion
try:
#self.session.begin_nested()
self.session.add(newUser)
self.session.add(newGroup)
self.session.add(relacionUsrGrp)
self.session.commit()
transaction_succesful=True
except e:
print(e)
self.session.rollback()
# Si todo ha ido bien (creacion del usuario, creacion del grupo y creacion de acl), devolvemos
# un registro con el usuario creado
# si ha ido mal, devolvemos None
if transaction_succesful:
storedUser = self.findUserByUID(theUID)
else:
storedUser = None
return storedUser
def setUserAdmin(self,requestedUser):
print(requestedUser.__repr__())
if requestedUser != None:
storedUser=self.findUserByUsername(requestedUs |
piotroxp/scibibscan | scib/lib/python3.5/site-packages/astropy/wcs/tests/extension/test_extension.py | Python | mit | 2,891 | 0.001038 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicod | e_literals
import os
import subprocess
import sys
from ....tests.helper import pytest
def test_wcsapi_extension(tmpdir):
# Test that we can build a simple C extension with the astropy.wcs C API
setup_path = os.path.dirname(__file__)
astropy_path = os.path.abspath(
os.path.join(setup_path, '..', '..', '..', '..'))
env = os.environ.copy()
paths = [str(tmpdir), astropy_path]
if env.get('PYTHONPATH'):
paths.append(env.get('PYTHONPATH'))
env[str('PY | THONPATH')] = str(os.pathsep.join(paths))
# Build the extension
# This used to use subprocess.check_call, but on Python 3.4 there was
# a mysterious Heisenbug causing this to fail with a non-zero exit code
# *unless* the output is redirected. This bug also did not occur in an
# interactive session, so it likely had something to do with pytest's
# output capture
p = subprocess.Popen([sys.executable, 'setup.py', 'install',
'--install-lib={0}'.format(tmpdir),
astropy_path], cwd=setup_path, env=env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Whether the process fails or not this isn't likely to produce a great
# deal of output so communicate should be fine in almost all cases
stdout, stderr = p.communicate()
try:
stdout, stderr = stdout.decode('utf8'), stderr.decode('utf8')
except UnicodeDecodeError:
# Don't try to guess about encoding; just display the text
stdout, stderr = stdout.decode('latin1'), stderr.decode('latin1')
# If compilation fails, we can skip this test, since the
# dependencies necessary to compile an extension may be missing.
# If it passes, however, we want to continue and ensure that the
# extension created is actually usable. However, if we're on
# Travis-CI, or another generic continuous integration setup, we
# don't want to ever skip, because having it fail in that
# environment probably indicates something more serious that we
# want to know about.
if (not (str('CI') in os.environ or
str('TRAVIS') in os.environ or
str('CONTINUOUS_INTEGRATION') in os.environ) and
p.returncode):
pytest.skip("system unable to compile extensions")
return
assert p.returncode == 0, (
"setup.py exited with non-zero return code {0}\n"
"stdout:\n\n{1}\n\nstderr:\n\n{2}\n".format(
p.returncode, stdout, stderr))
code = """
import sys
import wcsapi_test
sys.exit(wcsapi_test.test())
"""
code = code.strip().replace('\n', '; ')
# Import and run the extension
subprocess.check_call([sys.executable, '-c', code], env=env)
|
elijah74/django-url-shortener | base/conf/urls/all.py | Python | bsd-3-clause | 915 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""shortener URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django | .contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^shortener/', include('shortener.urls', namespace='short | ener')),
]
|
koodaamo/asynciohelpers | asynciohelpers/service.py | Python | gpl-3.0 | 6,449 | 0.021244 | from contextlib import suppress
import asyncio
from concurrent.futures import CancelledError
from .exceptions import SetupException
class AsyncioRunning:
"base runner class that sets up and runs the loop & payload"
RESTART_DELAY = 15 # seconds until waiter & runner are restarted
# these three required per the ABC
_host = None
_port = None
_ssl = None
_loop = None
_external_loop = False
def set_loop(self, loop):
self._loop = loop
self._external_loop = True
def _on_wait_completed(self, *args):
"also stop the runner when waiting is complete"
self._closing = True
try:
self._run_task.cancel()
except:
# it's possible the runnables have not been created yet...
pass
def start(self):
self._loop = self._loop or asyncio.get_event_loop()
self._logger.debug("%s start requested" % self.__class__.__name__)
self._closing = False
self._started = asyncio.Future(loop=self._loop)
# start the runner coro after setup is done, or exit if error
self._wait_task = self._loop.create_task(self._wait())
self._wait_task.add_done_callback(self._on_wait_completed)
def setup_complete(future):
exc = future.exception()
if exc:
self._logger.error("setup failed, stopping immediately: %s" % str(exc))
setup_failed = SetupException("failure: %s" % str(exc))
self._started.set_exception(setup_failed)
else:
self._logger.debug("setup completed ok, scheduling runner")
self._run_task = self._loop.create_task(self._run())
self._started.set_result(True)
self._setup_task = self._loop.create_task(self._setup())
self._setup_task.add_done_callback(setup_complete)
if self._external_loop:
return self._started
# run the setup
try:
self._loop.run_until_complete(self._setup_task)
except Exception as exc:
self._wait_task.cancel()
self._loop.call_soon_threadsafe(self._loop.stop)
# run the main loop; we can be stopped either by waiter completing & canceling
# runner or by stop(), which actually just cancels the waiter
while not self._closing:
try:
self._loop.run_until_complete(self._run_task)
except CancelledError:
self._logger.info("runner cancelled")
break
except KeyboardInterrupt:
self._logger.info("runner terminated by user action")
self._wait_task.cancel()
break
except Exception as exc:
self._logger.error("runner failure: %s" % str(exc))
self._delaying = asyncio.sleep(self.RESTART_DELAY, loop=self._loop)
self._loop.run_until_complete(self._delaying)
if not self._closing:
self._run_task = self._loop.create_task(self._run())
self._teardown_task = self._loop.create_task(self._teardown())
try:
self._loop.run_until_complete(self._teardown_task)
except Exception as exc:
self._logger.warn("teardown problem: %s" % exc)
remaining = asyncio.Task.all_tasks()
for task in remaining:
task.cancel()
try:
self._loop.run_until_complete(asyncio.gather(*remaining))
except CancelledError:
pass
except Exception as exc:
self._logger.warn("cleanup failure: %s" % exc)
try:
self._loop.call_soon_threadsafe(self._loop.stop)
except Exception as exc:
self._logger.warn(str(exc))
self._loop.close()
self._logger.info("%s is now shut down" % self.__class__.__name__)
def stop(self, *args, **kwargs):
"cancel the task and return the stop future if external loop"
self._logger.info("stop requested")
self._wait_task.cancel()
if self._external_loop:
self._teardown_task = self._loop.create_task(self._teardown())
return self._teardown_task
class AsyncioConnecting(AsyncioRunning):
"asyncio service that connects a given transport"
_transport_factory = None # protocol class, or other instance factory
_host = None # FQDN
_port = None # int
_ssl = False # bool
async def _connect(self):
args = (self._transport_factory,)
kwargs = {"host": self._host, "port": self._port, "ssl": self._ssl}
connector = self._loop.create_connection(*args, **kwargs)
(self._transport, self._protocol) = await connector
self._protocol.is_closed = asyncio.Future(loop=self._loop)
self._logger.debug("connected transport (%s)" % self._transport.__class__.__name__)
async def _setup(self):
self._logger.debug("connecting")
await self._connect()
async def _teardown(self):
self._logger.debug("closing transport")
try:
self._transport.close()
except Exception as exc:
self._logger.warn("cannot close transport: %s" % exc)
# protocol implementation MUST set this:
await self._protocol.is_closed
class AsyncioReConnecting(AsyncioConnecting):
"asyncio service that connects a given transport"
RECONNECT_DELAY = 5 # seconds
async def _connect(self):
"after super() setup has run, register a protocol close (re)connect callback"
await asyncio.sleep(self.RECONNECT_DELAY, loop=self._loop)
while not self._closing:
self._logger.debug("attempting connect")
try:
result = await super()._connect()
except ConnectionError as exc:
self._logger.warn("connection refused, retrying: %s" % str(exc))
except CancelledError as exc:
self._logger.error("connect cancelled, stopping: %s" % str(exc))
break
except SetupException as exc:
self._logger.error("setup failed: %s" % exc)
break
except Exception as exc:
self._logger.error("unhandled error %s: % | s" % | (type(exc), exc))
else:
self._protocol.is_closed.add_done_callback(self._reconnect)
break
await asyncio.sleep(self.RECONNECT_DELAY, loop=self._loop)
def _reconnect(self, future):
if not self._closing:
self._logger.warn("connection lost, reconnecting")
reconnect = self._loop.create_task(self._connect())
else:
self._logger.warn("already closing, not reconnecting")
|
sialm/par_king | client/ParKingClient.py | Python | mit | 11,117 | 0.004408 | # from i2clibraries import i2c_hmc58831
from socket import socket
from socket import AF_INET
from socket import SOCK_STREAM
from socket import error as socket_error
from time import sleep
from time import time
from struct import pack
from datetime import datetime
from threading import Thread
import config
import ParKingPacket
from i2clibraries import i2c_hmc5883l
import RPi.GPIO as GPIO # import RPi.GPIO module
class ParKingClient:
THRESHOLD = 125
LOWER_THRESHOLD = 10
TIME_FORMAT_STRING = '%Y-%m-%d %H:%M:%S'
#######################################################################################################################
# SETUP METHODS
#######################################################################################################################
def __init__(self, service_port, host_ip, spots_available, data_log_mode=False):
'''
This will create a ParKingClient
:param service_port:
:param host_ip:
:param data_log_mode:
:return:
'''
self.data_log_mode = data_log_mode
if self.data_log_mode:
self.log_file = self.create_logs()
self.data_file = self.create_data_file()
else:
self.log_file = None
self.data_file = None
self.index_for_csv = 1
self.host_ip = host_ip
self.service_port = service_port
self.running = False
self.sock = socket(AF_INET, SOCK_STREAM)
self.connect()
self.send_init_packet(spots_available)
alive_thread = Thread(target=self.keep_alive, args=())
alive_thread.daemon = True
alive_thread.start()
GPIO.setmode(GPIO.BCM)
self.write_to_log('creating sensor 1')
self.sensor_1 = i2c_hmc5883l.i2c_hmc5883l(1)
self.sensor_1.setContinuousMode()
self.sensor_1.setDeclination(0,6)
self.write_to_log('sensor one created')
if not config.ONE_SENSOR:
self.write_to_log('creating sensor 2')
self.sensor_2 = i2c_hmc5883l.i2c_hmc5883l(0)
self.sensor_2.setContinuousMode()
self.sensor_2.setDeclination(0,6)
self.write_to_log('sensor two created')
sleep(2)
(x, y, z) = self.read_from_sens | or_1()
self.z_base_line_1 = z
self.last_z_signal_1 = 0
if not config.ONE_SENSOR:
(x, y, z) = self.read_from_sensor_2()
self.z_base_line_2 = z
self.last_z_signal_2 = 0
def create_logs(self):
"""
Create | s a unique log file per session
:return: log file
"""
try:
file_name = 'log_file'
log_file = open(file_name, 'w')
return log_file
except Exception as e:
print('Log file error, shutting down.')
self.tear_down()
def create_data_file(self):
"""
Creates a unique log file per session
:return: log file
"""
try:
file_name = 'data.csv'
data_file = open(file_name, 'w')
return data_file
except Exception as e:
print('data file error, shutting down.')
self.tear_down()
def tear_down(self):
"""l
Called upon exit, this should tear down the existing resources that are not managed by daemons
:return:
"""
GPIO.cleanup()
self.write_to_log('teardown started')
if self.sock:
close_packet = ParKingPacket.pack_close_packet(config.UNIQUE_ID)
self.write_to_log('closing connection with server')
self.sock.sendall(close_packet)
self.write_to_log('closing listening socket')
self.sock.close()
if self.data_file:
self.write_to_log('closing data file')
self.data_file.close()
if self.log_file:
self.write_to_log('closing log file')
self.log_file.close()
def connect(self):
"""
This connects to the server. In the event that it fails to connect it will tear down the ParKingClient
:return:
"""
try:
self.write_to_log('opening socket')
self.sock.connect((self.host_ip, self.service_port))
except socket_error as e:
print('Could not create socket, tearing down.')
self.tear_down()
self.write_to_log('socket opened!')
def read_from_sensor_1(self):
"""
This will pull the value from the sensor. If the sensor is at it's max negative value, it will return None
to avoid this we move it to one plus the max negative value. We then shift it everything up so we don't have to
worry about artifacts while crossing zero.
:return:
"""
(x,y,z) = self.sensor_1.getAxes()
if (z is None):
z = -4095
z = z + 4096
return (x,y,z)
def read_from_sensor_2(self):
"""
This will pull the value from the sensor. If the sensor is at it's max negative value, it will return None
to avoid this we move it to one plus the max negative value. We then shift it everything up so we don't have to
worry about artifacts while crossing zero.
:return:
"""
return (1,1,1)
(x,y,z) = self.sensor_2.getAxes()
if (z is None):
z = -4095
z = z + 4096
return (x,y,z)
#######################################################################################################################
# RUN METHODS
#######################################################################################################################
def run(self):
self.write_to_log('Running')
self.running = True
if config.ONE_SENSOR:
self.run_in_lane()
elif (config.SENSOR_CONFIG is config.TWO_LANE):
goes_in_thread = Thread(target=self.run_in_lane, args=())
goes_in_thread.daemon = True
goes_in_thread.start()
self.run_out_lane()
def run_in_lane(self):
"""
Monitor traffic on one lane.
:return:
"""
self.write_to_log('run_in_lane.')
tripped = False
for i in range(100):
# calibrate sensor
(x,y,z_1) = self.read_from_sensor_1()
self.z_base_line_1 = self.z_base_line_1*.95 + .05*z_1
sleep(0.05)
self.write_to_log('in_lane calibration complete.')
while self.running:
sleep(.5)
(x,y,z_1) = self.read_from_sensor_1()
z_val_1 = abs(z_1 - self.z_base_line_1)
z_max_1 = z_val_1
self.write_to_log('z : ' + str(z_val_1))
self.write_to_data_file(str(z_val_1))
if z_val_1 > self.THRESHOLD:
tripped = True
if z_val_1 < self.LOWER_THRESHOLD:
if tripped:
self.write_to_log('in lane : sending goes ins packet')
t = Thread(target=self.send_goes_in_packet, args=(z_max_1, ))
t.daemon = True
t.start()
tripped = False
else:
self.z_base_line_1 = self.z_base_line_1*.95 + .05*z_1
def run_out_lane(self):
self.write_to_log('run_out_lane.')
for i in range(100):
# calibrate sensor
(x,y,z_2) = self.read_from_sensor_2()
self.z_base_line_2 = self.z_base_line_2*.95 + .05*z_2
sleep(0.05)
self.write_to_log('out_lane calibration complete.')
while self.running:
sleep(0.5)
(x,y,z_2) = self.read_from_sensor_2()
z_val_2 = z_2 - self.z_base_line_2
z_max_2 = z_val_2
while z_val_2 > self.THRESHOLD:
sleep(0.05)
(x,y,z_2) = self.read_from_sensor_2()
z_val_2 = z_2 - self.z_base_line_2
z_max_2 = max(z_val_2, z_max_2)
if z_val_2 < self.THRESHOLD:
|
stvstnfrd/edx-platform | openedx/core/djangoapps/xblock/learning_context/manager.py | Python | agpl-3.0 | 1,895 | 0.001583 | """
Helper methods for working with learning contexts
"""
from edx_django_utils.plugins import PluginManager
from opaque_keys import OpaqueKey
from opaque_keys.edx.keys import LearningContextKey, UsageKeyV2
from openedx.core.djangoapps.xblock.apps import get_xblock_app_config
class LearningContextPluginManager(PluginManager):
"""
Plugin manager that uses stevedore extension points (entry points) to allow
learning contexts to register as plugins.
The key of the learning context must match the CANONICAL_NAMESPACE of its
LearningContextKey
"""
NAMESPACE = 'openedx.learning_context'
_learning_context_cache = {}
def get_learning_context_impl(key):
"""
Given an opaque key, get the implementation of its learning context.
Returns a subclass of LearningContext
Raises TypeError if the specified key isn't a type that has a learning
context.
Raises PluginError if there is some misconfiguration causing the context
implementation to not b | e installed.
"""
if isinstance(key, LearningContextKey):
context_type | = key.CANONICAL_NAMESPACE # e.g. 'lib'
elif isinstance(key, UsageKeyV2):
context_type = key.context_key.CANONICAL_NAMESPACE
elif isinstance(key, OpaqueKey):
# Maybe this is an older modulestore key etc.
raise TypeError("Opaque key {} does not have a learning context.".format(key))
else:
raise TypeError("key '{}' is not an opaque key. You probably forgot [KeyType].from_string(...)".format(key))
try:
return _learning_context_cache[context_type]
except KeyError:
# Load this learning context type.
params = get_xblock_app_config().get_learning_context_params()
_learning_context_cache[context_type] = LearningContextPluginManager.get_plugin(context_type)(**params)
return _learning_context_cache[context_type]
|
dejlek/pulsar | examples/philosophers/tests.py | Python | bsd-3-clause | 1,021 | 0 | import unittest
import asyncio
from pulsar import send
from pulsar.apps.test import test_timeout
from .manage import DiningPhilosophers
class TestPhylosophers(unittest.TestCase):
app_cfg = None
concurrency = 'thread'
@classmethod
@asyncio.coroutine
def setUpClass(cls) | :
app = DiningPhilosophers(name='plato',
concurrency=cls.concurrency)
cls.app_cfg = yield from send('arbiter', 'run', app)
@test_timeout(30)
@asyncio.coroutine
def test_info(self):
while True:
yield from asyncio.sleep(0.5)
info = yield from send('plato', 'info')
all = []
for data in info.get('workers', []):
p = data.get('philosopher')
| if p:
all.append(p)
if len(all) == 5:
break
@classmethod
def tearDownClass(cls):
if cls.app_cfg is not None:
return send('arbiter', 'kill_actor', cls.app_cfg.name)
|
KITPraktomatTeam/Praktomat | src/utilities/file_operations.py | Python | gpl-2.0 | 3,099 | 0.002259 | # -*- coding: utf-8 -*-
import os
import grp
import tempfile
from django.conf import settings
from utilities import encoding
import shutil
import zipfile
gid = None
if (settings.USEPRAKTOMATTESTER):
gid = grp.getgrnam('praktomat').gr_gid
def makedirs(path):
if os.path.exists(path):
return
else:
(head, tail) = os.path.split(path)
makedirs(head)
os.mkdir(path)
if (gid):
os.chown(path, -1, gid)
os.chmod(path, 0o770)
def create_file(path, content, override=True, binary=False):
""" """
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
makedirs(dirname)
else:
if os.path.exists(path):
if override: # delete file
os.remove(path)
else: # throw exception
raise Exception('File already exists')
with open(path, 'wb') as fd:
if binary:
fd.write(content)
else:
fd.write(encoding.get_utf8(encoding.get_unicode(content)))
if (gid):
# chown :praktomat <path>
os.chown(path, -1, gid)
# rwxrwx--- access for praktomattester:praktomat
os.chmod(path, 0o770)
def copy_file(from_path, to_path, to_is_directory=False, override=True):
""" """
if to_is_directory:
to_path = os.path.join(to_path, os.path.basename(from_path))
with open(from_path, "rb") as fd:
create_file(to_path, fd.read(), override=override, binary=True)
def create_tempfolder(path):
makedirs(path)
tempfile.tempdir = path
new_tmpdir = tempfile.mkdtemp()
if (gid):
os.chown(new_tmpdir, -1, gid)
os.chmod(new_tmpdir, 0o770)
return new_tmpdir
class InvalidZipFile(Exception):
pass
def unpack_zipfile_to(zipfilename, to_path, override_cb=None, file_cb=None):
"""
Extracts a zipfile to the given location, trying to safeguard against wrong paths
The override_cb is called for every file that ov | erwrites an existing file,
with the name of the file in the archive as the parameter.
The file_cb is called for every file, after extracting it.
"""
if not zipfile.is_zipfile(zipfilename):
raise InvalidZipFile("File %s is not a zipfile." % zipfilename)
zip = zipfile.ZipFile(zipfilename, 'r')
if zip.testzip():
raise InvalidZipFile("File %s is invalid." % zipfilename)
# zip.extractal | l would not protect against ..-paths,
# it would do so from python 2.7.4 on.
for finfo in zip.infolist():
dest = os.path.join(to_path, finfo.filename)
# This check is from http://stackoverflow.com/a/10077309/946226
if not os.path.realpath(os.path.abspath(dest)).startswith(to_path):
raise InvalidZipFile("File %s contains illegal path %s." % (zipfilename, finfo.filename))
if override_cb is not None and os.path.exists(dest):
override_cb(finfo.filename)
zip.extract(finfo, to_path)
if file_cb is not None and os.path.isfile(os.path.join(to_path, finfo.filename)):
file_cb(finfo.filename)
|
RuthAngus/LSST-max | code/soft/regions.py | Python | mit | 5,812 | 0.025292 | import numpy as np
import matplotlib.pyplot as plt
import time
def regions(seed=0, randspots, activityrate=1, cyclelength=1, cycleoverlap=0,
maxlat=70, minlat=0, tsim=1000, tstart=0, dir="."):
"""
inputs
activityrate - number of bipoles (1= solar)
cyclelength - length of cycle in years
cycleoverlap - cycleoverlap time in years
tsim - length of simulation in days
tstart - first day to start outputting bipoles
minlat - minimum latitude of spot emergence
maxlat - maximum latitude of spot emergence
randspots - set with /randspots. Use this for no cycle
This program simulates the solar cycle. It produces a list
of active regions with the following parameters:
nday = day of emergence
thpos= theta of positive pole (radians)
phpos= phi of positive pole (radians)
thneg= theta of negative pole ( | radians)
phneg= phi of negative pole (radians)
width= width of each pole (radians)
bmax = maximum flux density (Gauss)
According to Schrijver and Harvey (1994), the number of active regions
| emerging with areas in the range [A,A+dA] in a time dt is given by
n(A,t) dA dt = a(t) A^(-2) dA dt ,
where A is the "initial" area of a bipole in square degrees, and t is
the time in days; a(t) varies from 1.23 at cycle minimum to 10 at cycle
maximum.
The bipole area is the area within the 25-Gauss contour in the
"initial" state, i.e. time of maximum development of the active region.
The assumed peak flux density in the initial sate is 1100 G, and
width = 0.2*bsiz (see disp_region). The parameters written onto the
file are corrected for further diffusion and correspond to the time
when width = 4 deg, the smallest width that can be resolved with lmax=63.
In our simulation we use a lower value of a(t) to account for "correlated"
regions.
"""
nbin = 5 # number of area bins
delt = 0.5 # delta ln(A)
amax = 100. # orig. area of largest bipoles (deg^2)
dcon = exp(0.5*delt)-exp(-0.5*delt) # contant from integ. over bin
print('Creating regions with the following parameters:')
print('Activity rate: ', activityrate, ' x Solar rate.'
print('Cycle length: ', cyclelength, ' years.')
print('Cycle overlap: ', cycleoverlap, ' years.')
print('Max spot lat: ', maxlat, ' degrees')
print('Min spot lat: ', minlat, ' degrees')
print('Simulation time: ', tsim, ' days')
print('Simulation start:', tstart, ' days')
deviation = 5 # rmsd deviation from butterfly pattern
atm = np.zeros(200) + 10.*activityrate
ncycle = np.zeros(200) + cyclelength
nclen = np.zeros(200) + cyclelength + cycleoverlap
latrmsd = np.zeros(200) + deviation
# a(t) at cycle maximum (deg^2/day)
# cycle period (days)
# cycle duration (days)
ncycle, nclen = ncycle*365., nclen*365.
fact = np.exp(delt*np.ones(nbin)) # array of area reduction factors
ftot = sum(fact) # sum of reduction factors
bsiz = np.sqrt(amax/fact) # array of bipole separations (deg)
tau1 = 5. # first and last times (in days) for
tau2 = 15. # emergence of "correlated" regions
prob = 0.001 # total probability for "correlation"
nlon = 36 # number of longitude bins
nlat = 16 # number of latitude bins
nday1 = 0 # first day to be simulated
ndays = tsim # number of days to be simulated
dt = 1
def add_region(nday, ic, lo, lat, k, bsiz1, seed, phase):
w_org = .4 * bsiz1 # original width (degrees), at birth
width = 4. # final width (degrees), at death
bmax = 250. * (w_org / width)**2 # final peak flux density (G)
bsizr = np.pi * bsiz1 / 180. # pole separation in radians
width = np.pi * width / 180. # final width in radians
np.seed(seed)
rand_array = np.random.randn(100) # random number less than 1.6
x = rand_array[rand_array < 1.6][0]
y = rand_array[rand_array < 1.8][0]
np.seed(seed)
z = np.random.uniform(1)
if z > 1.4:
ang = .5*lat + 2. + 27.*x*y # tilt angle (degrees)
else:
if __name__ == "__main__":
# # Initialize random number generator:
# if seed == -1:
# seed = time.time()
# # Initialize time since last emergence of a large region, as function
# # of longitude, latitude and hemisphere:
# tau = range(nlon, nlat, 2) + tau2
# dlon = 360. / nlon
# dlat = maxlat / nlat
# ncnt = 0
# # Loop over time (in days):
# ncur = 0
# cycle_days = ncycle[0]
# start_day = 0
# for nday in range(ndays):
# # Compute index of most recently started cycle:
# ncur_test = nday / cycle_days
# ncur_now = nday / cycle_days
# ncur_prev = (nday - 1) / cycle_days
# if int(ncur_now) != int(ncur_prev):
# ncur += ncur
# cycle_days += ncycle(ncur)
# # Initialize rate of emergence for largest regions, and add 1 day
# # to time of last emergence:
# tau += 1
# rc0 = np.arange(nlon, nlat, 2)
# index = (tau > tau1) * (tau l<= tau2)
# if index[0] > -1:
# rc0[index] = prob / (tau2 - tau1)
# # Loop over current and previous cycle:
# for icycle in range(2):
# nc = ncur - icycle # index of cycle
# if ncur == 0:
# nc1 = 0
# start_day = nc*ncycle[0]
# else:
# nc1 = nc
# if ncur == 1:
# if icycle == 0:
# start_day = int(sum(ncycle[:nc-1]))
# if icycle == 1:
# start_day = 0
# else:
# start_day = int(sum(ncycle[:nc-1]))
# np.savetxt("{0}/regions.txt".format(dir))
|
tylertian/Openstack | openstack F/python-novaclient/novaclient/tests/v1_1/test_security_group_rules.py | Python | apache-2.0 | 2,485 | 0.002414 | from novaclient import exceptions
from novaclient.v1_1 import security_group_rules
from novaclient.tests import utils
from novaclient.tests.v1_1 import fakes
cs = fakes.FakeClient()
class SecurityGroupRulesTest(utils.TestCase):
def test_delete_security_group_rule(self):
cs.security_group_rules.delete(1)
cs.assert_called('DELETE', '/os-security-group-rules/1')
def test_create_security_group_rule(self):
sg = cs.security_group_rules.create(1, "tcp", 1, 65535, "10.0.0.0/16")
body = {
"security_group_rule": {
"ip_protocol": "tcp",
"from_port": 1,
"to_port": 65535,
"cidr": "10.0.0.0/16",
"group_id": None,
"parent_group_id": 1,
}
}
cs.assert_called('POST', '/os-security-group-rules', body)
self.assertTrue(isinstance(sg, security_group_rules.SecurityGroupRule))
def test_create_security_group_group_ | rule(self):
sg = cs.security_group_rules.create(1, "tcp", 1, 65535, "10.0.0.0/16",
101)
body = {
"security_group_rule": {
"ip_protocol": "tcp",
"from_port": 1,
"to_port": 65535,
"cidr": "10.0.0.0/16",
"group_i | d": 101,
"parent_group_id": 1,
}
}
cs.assert_called('POST', '/os-security-group-rules', body)
self.assertTrue(isinstance(sg, security_group_rules.SecurityGroupRule))
def test_invalid_parameters_create(self):
self.assertRaises(exceptions.CommandError,
cs.security_group_rules.create,
1, "invalid_ip_protocol", 1, 65535, "10.0.0.0/16", 101)
self.assertRaises(exceptions.CommandError,
cs.security_group_rules.create,
1, "tcp", "invalid_from_port", 65535, "10.0.0.0/16", 101)
self.assertRaises(exceptions.CommandError,
cs.security_group_rules.create,
1, "tcp", 1, "invalid_to_port", "10.0.0.0/16", 101)
def test_security_group_rule_str(self):
sg = cs.security_group_rules.create(1, "tcp", 1, 65535, "10.0.0.0/16")
self.assertEquals('1', str(sg))
def test_security_group_rule_del(self):
sg = cs.security_group_rules.create(1, "tcp", 1, 65535, "10.0.0.0/16")
sg.delete()
cs.assert_called('DELETE', '/os-security-group-rules/1')
|
erdc-cm/air-water-vv | 3d/dambreak_Ubbink/dambreak_Ubbink_medium/ls_consrv_n.py | Python | mit | 982 | 0.01222 | from proteus import *
from dambreak_Ubbink_medium import *
from ls_consrv_p import *
timeIntegrator = ForwardIntegrator
timeIntegration = NoInte | gration
femSpaces = {0:basis}
subgridError = None
massLumping = False
numericalFluxType = DoNothing
conservativeFlux = None
shockCapturing = None
fullNewtonFlag = True
multilevelNonlinearSolver = Newton
levelNonlinearSolver = Newt | on
nonlinearSmoother = None
linearSmoother = None
matrix = SparseMatrix
if useOldPETSc:
multilevelLinearSolver = PETSc
levelLinearSolver = PETSc
else:
multilevelLinearSolver = KSP_petsc4py
levelLinearSolver = KSP_petsc4py
if useSuperlu:
multilevelLinearSolver = LU
levelLinearSolver = LU
linear_solver_options_prefix = 'mcorr_'
linearSolverConvergenceTest = 'r-true'
tolFac = 0.0
linTolFac = 0.01
l_atol_res = 0.01*mcorr_nl_atol_res
nl_atol_res = mcorr_nl_atol_res
useEisenstatWalker = False
maxNonlinearIts = 50
maxLineSearches = 0
|
chaserhkj/musicbox | setup.py | Python | mit | 1,899 | 0.003686 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: omi
# @Date: 2014-08-24 22:08:33
# @Last Modified by: omi
# @Last Modified time: 2015-03-30 23:36:21
'''
__ ___________________________________________
| \ ||______ | |______|_____||______|______
| \_||______ | |______| |______||______
________ __________________________ _____ _ _
| | || ||______ | | |_____]| | \___/
| | ||_____|______|__|__|_____ |_____]|_____|_/ \_
+ ------------------------------------------ +
| NetEase-MusicBox 320kbps |
+ ------------------------------------------ +
| |
| ++++++++++++++++++++++++++++++++++++++ |
| ++++++++++++++++++++++++++++++++++++++ |
| ++++++++++++++++++++++++++++++++++++++ |
| ++++++++++++++++++++++++++++++++++++++ |
| ++++++++++++++++++++++++++++++++++++++ |
| |
| A sexy cli musicbox based on Python |
| Music resource from music.163.com |
| |
| Built with love | to music by omi |
| |
+ ------------------------------------------ +
'''
from setuptools import setup, find_packages
setup(
name='NetEase-MusicBox',
version='0.1.9.6',
packages=find_packages(),
include_package_data=True,
install_requires=[
'requests',
'BeautifulSoup4',
'pycrypto',
],
entry_points={
'console_scripts': [
'musicbox = NEMbox:start'
| ],
},
author='omi',
author_email='4399.omi@gmail.com',
url='https://github.com/darknessomi/musicbox',
description='A sexy command line interface musicbox',
keywords=['music', 'netease', 'cli', 'player'],
zip_safe=False,
)
|
MyRookie/SentimentAnalyse | venv/lib/python2.7/site-packages/nltk/corpus/reader/semcor.py | Python | mit | 11,106 | 0.005312 | # Natural Language Toolkit: SemCor Corpus Reader
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Nathan Schneider <nschneid@cs.cmu.edu>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Corpus reader for the SemCor Corpus.
"""
from __future__ import absolute_import, unicode_literals
__docformat__ = 'epytext en'
from nltk.corpus.reader.api import *
from nltk.corpus.reader.xmldocs import XMLCorpusReader, XMLCorpusView
from nltk.tree import Tree
class SemcorCorpusReader(XMLCorpusReader):
"""
Corpus reader for the SemCor Corpus.
For access to | the complete XML data structure, use the ``xml()``
|
method. For access to simple word lists and tagged word lists, use
``words()``, ``sents()``, ``tagged_words()``, and ``tagged_sents()``.
"""
def __init__(self, root, fileids, wordnet, lazy=True):
XMLCorpusReader.__init__(self, root, fileids)
self._lazy = lazy
self._wordnet = wordnet
def words(self, fileids=None):
"""
:return: the given file(s) as a list of words and punctuation symbols.
:rtype: list(str)
"""
return self._items(fileids, 'word', False, False, False)
def chunks(self, fileids=None):
"""
:return: the given file(s) as a list of chunks,
each of which is a list of words and punctuation symbols
that form a unit.
:rtype: list(list(str))
"""
return self._items(fileids, 'chunk', False, False, False)
def tagged_chunks(self, fileids=None, tag=('pos' or 'sem' or 'both')):
"""
:return: the given file(s) as a list of tagged chunks, represented
in tree form.
:rtype: list(Tree)
:param tag: `'pos'` (part of speech), `'sem'` (semantic), or `'both'`
to indicate the kind of tags to include. Semantic tags consist of
WordNet lemma IDs, plus an `'NE'` node if the chunk is a named entity
without a specific entry in WordNet. (Named entities of type 'other'
have no lemma. Other chunks not in WordNet have no semantic tag.
Punctuation tokens have `None` for their part of speech tag.)
"""
return self._items(fileids, 'chunk', False, tag!='sem', tag!='pos')
def sents(self, fileids=None):
"""
:return: the given file(s) as a list of sentences, each encoded
as a list of word strings.
:rtype: list(list(str))
"""
return self._items(fileids, 'word', True, False, False)
def chunk_sents(self, fileids=None):
"""
:return: the given file(s) as a list of sentences, each encoded
as a list of chunks.
:rtype: list(list(list(str)))
"""
return self._items(fileids, 'chunk', True, False, False)
def tagged_sents(self, fileids=None, tag=('pos' or 'sem' or 'both')):
"""
:return: the given file(s) as a list of sentences. Each sentence
is represented as a list of tagged chunks (in tree form).
:rtype: list(list(Tree))
:param tag: `'pos'` (part of speech), `'sem'` (semantic), or `'both'`
to indicate the kind of tags to include. Semantic tags consist of
WordNet lemma IDs, plus an `'NE'` node if the chunk is a named entity
without a specific entry in WordNet. (Named entities of type 'other'
have no lemma. Other chunks not in WordNet have no semantic tag.
Punctuation tokens have `None` for their part of speech tag.)
"""
return self._items(fileids, 'chunk', True, tag!='sem', tag!='pos')
def _items(self, fileids, unit, bracket_sent, pos_tag, sem_tag):
if unit=='word' and not bracket_sent:
# the result of the SemcorWordView may be a multiword unit, so the
# LazyConcatenation will make sure the sentence is flattened
_ = lambda *args: LazyConcatenation((SemcorWordView if self._lazy else self._words)(*args))
else:
_ = SemcorWordView if self._lazy else self._words
return concat([_(fileid, unit, bracket_sent, pos_tag, sem_tag, self._wordnet)
for fileid in self.abspaths(fileids)])
def _words(self, fileid, unit, bracket_sent, pos_tag, sem_tag):
"""
Helper used to implement the view methods -- returns a list of
tokens, (segmented) words, chunks, or sentences. The tokens
and chunks may optionally be tagged (with POS and sense
information).
:param fileid: The name of the underlying file.
:param unit: One of `'token'`, `'word'`, or `'chunk'`.
:param bracket_sent: If true, include sentence bracketing.
:param pos_tag: Whether to include part-of-speech tags.
:param sem_tag: Whether to include semantic tags, namely WordNet lemma
and OOV named entity status.
"""
assert unit in ('token', 'word', 'chunk')
result = []
xmldoc = ElementTree.parse(fileid).getroot()
for xmlsent in xmldoc.findall('.//s'):
sent = []
for xmlword in _all_xmlwords_in(xmlsent):
itm = SemcorCorpusReader._word(xmlword, unit, pos_tag, sem_tag, self._wordnet)
if unit=='word':
sent.extend(itm)
else:
sent.append(itm)
if bracket_sent:
result.append(SemcorSentence(xmlsent.attrib['snum'], sent))
else:
result.extend(sent)
assert None not in result
return result
@staticmethod
def _word(xmlword, unit, pos_tag, sem_tag, wordnet):
tkn = xmlword.text
if not tkn:
tkn = "" # fixes issue 337?
lemma = xmlword.get('lemma', tkn) # lemma or NE class
lexsn = xmlword.get('lexsn') # lex_sense (locator for the lemma's sense)
if lexsn is not None:
sense_key = lemma + '%' + lexsn
wnpos = ('n','v','a','r','s')[int(lexsn.split(':')[0])-1] # see http://wordnet.princeton.edu/man/senseidx.5WN.html
else:
sense_key = wnpos = None
redef = xmlword.get('rdf', tkn) # redefinition--this indicates the lookup string
# does not exactly match the enclosed string, e.g. due to typographical adjustments
# or discontinuity of a multiword expression. If a redefinition has occurred,
# the "rdf" attribute holds its inflected form and "lemma" holds its lemma.
# For NEs, "rdf", "lemma", and "pn" all hold the same value (the NE class).
sensenum = xmlword.get('wnsn') # WordNet sense number
isOOVEntity = 'pn' in xmlword.keys() # a "personal name" (NE) not in WordNet
pos = xmlword.get('pos') # part of speech for the whole chunk (None for punctuation)
if unit=='token':
if not pos_tag and not sem_tag:
itm = tkn
else:
itm = (tkn,) + ((pos,) if pos_tag else ()) + ((lemma, wnpos, sensenum, isOOVEntity) if sem_tag else ())
return itm
else:
ww = tkn.split('_') # TODO: case where punctuation intervenes in MWE
if unit=='word':
return ww
else:
if sensenum is not None:
try:
sense = wordnet.lemma_from_key(sense_key) # Lemma object
except Exception:
# cannot retrieve the wordnet.Lemma object. possible reasons:
# (a) the wordnet corpus is not downloaded;
# (b) a nonexistant sense is annotated: e.g., such.s.00 triggers:
# nltk.corpus.reader.wordnet.WordNetError: No synset found for key u'such%5:00:01:specified:00'
# solution: just use the lemma name as a string
try:
sense = '%s.%s.%02d' % (lem |
kramwens/order_bot | venv/lib/python2.7/site-packages/twilio/rest/resources/trunking/credential_lists.py | Python | mit | 1,709 | 0 | from .. import NextGenInstanceResource, NextGenListResource
class CredentialList(NextGenInstanceResource):
"""
A Credential List Resource.
See the `SIP Trunking API reference
<https://www.twilio.com/docs/sip-trunking/rest/credential-lists>_`
for more information.
.. attribute:: sid
The unique ID for this Credential List.
.. attribute:: trunk_sid
The unique ID of the Trunk that owns this Credential List.
"""
def delete(self):
"""
Disassociates a Credential List from the trunk.
"""
return self.parent.delete_instance(self.name)
class CredentialLists(NextGenListResource):
""" A list of Credential List resources """
name = "CredentialLists"
instance = CredentialList
key = "credential_lists"
def list(self, **kwargs):
"""
Retrieve the list of Credential List resources for a given trunk sid.
:param Page: The subset of results that needs to be fetched
:param PageSize: The size of the Page that needs to be fetched
"""
return super(CredentialLists, self).list(**kwargs)
def create(self, credential_list_sid):
"""
Associate a Credential List with a Trunk.
:param credential_list_sid: A human readable Credential list sid.
"""
data = {
'credential_list_sid': credential_list_sid
}
return self.create_instance(data)
def delete(self, credential_list_sid):
"""
| Disassociates a Credential List from the Trunk.
:param credentia | l_list_sid: A human readable Credential list sid.
"""
return self.delete_instance(credential_list_sid)
|
ruijie/quantum | quantum/tests/unit/test_linux_dhcp.py | Python | apache-2.0 | 23,666 | 0.000085 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import tempfile
import unittest2 as unittest
import mock
from quantum.agent.linux import dhcp
from quantum.agent.common import config
from quantum.openstack.common import cfg
from quantum.openstack.common import jsonutils
class FakeIPAllocation:
def __init__(self, address):
self.ip_address = address
class FakePort1:
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
admin_state_up = True
fixed_ips = [FakeIPAllocation('192.168.0.2')]
mac_address = '00:00:80:aa:bb:cc'
class FakePort2:
id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
admin_state_up = False
fixed_ips = [FakeIPAllocation('fdca:3ba5:a17a:4ba3::2')]
mac_address = '00:00:f3:aa:bb:cc'
class FakePort3:
id = '44444444-4444-4444-4444-444444444444'
admin_state_up = True
fixed_ips = [FakeIPAllocation('192.168.0.3'),
FakeIPAllocation('fdca:3ba5:a17a:4ba3::3')]
mac_address = '00:00:0f:aa:bb:cc'
class FakeV4HostRoute:
destination = '20.0.0.1/24'
nexthop = '20.0.0.1'
class FakeV6HostRoute:
destination = 'gdca:3ba5:a17a:4ba3::/64'
nexthop = 'gdca:3ba5:a17a:4ba3::1'
class FakeV4Subnet:
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
ip_version = 4
cidr = '192.168.0.0/24'
gateway_ip = '192.168.0.1'
enable_dhcp = True
host_routes = [FakeV4HostRoute]
dns_nameservers = ['8.8.8.8']
class FakeV6Subnet:
id = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
ip_version = 6
cidr = 'fdca:3ba5:a17a:4ba3::/64'
gateway_ip = 'fdca:3ba5:a17a:4ba3::1'
enable_dhcp = True
host_routes = [FakeV6HostRoute]
dns_nameservers = ['gdca:3ba5:a17a:4ba3::1']
class FakeV4SubnetNoDHCP:
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
ip_version = 4
cidr = '192.168.1.0/24'
gateway_ip = '192.168.1.1'
enable_dhcp = False
host_routes = []
dns_nameservers = []
class FakeV4SubnetNoGateway:
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
ip_version = 4
cidr = '192.168.1.0/24'
gateway_ip = None
enable_dhcp = True
host_routes = []
dns_nameservers = []
class FakeV4Network:
id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
subnets = [FakeV4Subnet()]
ports = [FakePort1()]
class FakeV6Network:
id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
subnets = [FakeV6Subnet()]
ports = [FakePort2()]
class FakeDualNetwork:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4Subnet(), FakeV6Subnet()]
ports = [FakePort1(), FakePort2(), FakePort3()]
class FakeDualNetworkSingleDHCP:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4Subnet(), FakeV4SubnetNoDHCP()]
ports = [FakePort1(), FakePort2(), FakePort3()]
class FakeV4NoGatewayNetwork:
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4SubnetNoGateway()]
ports = [FakePort1()]
class TestDhcpBase(unittest.TestCase):
def test_base_abc_error(self):
self.assertRaises(TypeError, dhcp.DhcpBase, None)
def test_replace_file(self):
# make file to replace
with mock.patch('tempfile.NamedTemporaryFile') as ntf:
ntf.return_value.name = '/baz'
with mock.patch('os.chmod') as chmod:
with mock.patch('os.rename') as rename:
dhcp.replace_file('/foo', 'bar')
expected = [mock.call('w+', dir='/', delete=False),
mock.call().write('bar'),
mock.call().close()] |
ntf.assert_has_calls(expected)
chmod.assert_called_once_with('/baz', 0644)
rename.assert_called_once_with('/baz', '/foo')
def test_resta | rt(self):
class SubClass(dhcp.DhcpBase):
def __init__(self):
dhcp.DhcpBase.__init__(self, None, None, None)
self.called = []
def enable(self):
self.called.append('enable')
def disable(self, retain_port=False):
self.called.append('disable %s' % retain_port)
def reload_allocations(self):
pass
@property
def active(self):
return True
c = SubClass()
c.restart()
self.assertEquals(c.called, ['disable True', 'enable'])
class LocalChild(dhcp.DhcpLocalProcess):
PORTS = {4: [4], 6: [6]}
def __init__(self, *args, **kwargs):
super(LocalChild, self).__init__(*args, **kwargs)
self.called = []
def reload_allocations(self):
self.called.append('reload')
def restart(self):
self.called.append('restart')
def spawn_process(self):
self.called.append('spawn')
class TestBase(unittest.TestCase):
def setUp(self):
root = os.path.dirname(os.path.dirname(__file__))
args = ['--config-file',
os.path.join(root, 'etc', 'quantum.conf.test')]
self.conf = config.setup_conf()
self.conf.register_opts(dhcp.OPTS)
self.conf.register_opt(cfg.StrOpt('dhcp_lease_relay_socket',
default='$state_path/dhcp/lease_relay'))
self.conf(args=args)
self.conf.set_override('state_path', '')
self.conf.use_namespaces = True
self.replace_p = mock.patch('quantum.agent.linux.dhcp.replace_file')
self.execute_p = mock.patch('quantum.agent.linux.utils.execute')
self.safe = self.replace_p.start()
self.execute = self.execute_p.start()
def tearDown(self):
self.execute_p.stop()
self.replace_p.stop()
class TestDhcpLocalProcess(TestBase):
def test_active(self):
dummy_cmd_line = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
self.execute.return_value = (dummy_cmd_line, '')
with mock.patch.object(LocalChild, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
lp = LocalChild(self.conf, FakeV4Network())
self.assertTrue(lp.active)
self.execute.assert_called_once_with(['cat', '/proc/4/cmdline'],
'sudo')
def test_active_cmd_mismatch(self):
dummy_cmd_line = 'bbbbbbbb-bbbb-bbbb-aaaa-aaaaaaaaaaaa'
self.execute.return_value = (dummy_cmd_line, '')
with mock.patch.object(LocalChild, 'pid') as pid:
pid.__get__ = mock.Mock(return_value=4)
lp = LocalChild(self.conf, FakeV4Network())
self.assertFalse(lp.active)
self.execute.assert_called_once_with(['cat', '/proc/4/cmdline'],
'sudo')
def test_get_conf_file_name(self):
tpl = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/dev'
with mock.patch('os.path.isdir') as isdir:
isdir.return_value = False
with mock.patch('os.makedirs') as makedirs:
lp = LocalChild(self.conf, FakeV4Network())
self.assertEqual(lp.get_conf_file_name('dev'), tpl)
self.assertFalse(makedirs.called)
def test_get_conf_file_name_ensure_dir(self):
tpl = '/dhcp/aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa/dev'
with mock.patch('os.path.isdir') as isdir:
isdir.return_value = False
with mock.patch('os.makedirs') as makedirs:
lp = LocalChild(self.conf, FakeV4Network())
self.assertEqual(lp.get_conf_file_name('dev', True), tpl)
self.assertTrue(makedirs |
iamhuy/rumour-veracity-verification | src/data/make_interim.py | Python | mit | 7,273 | 0.0044 | # -*- coding: utf-8 -*-
import os
import logging
from dotenv import find_dotenv, load_dotenv
from constants import *
import json
from utils import json_from_file, merge_json
import shutil
from settings import *
def prepare_train_data():
""" Runs data processing scripts to turn traning raw data from (../raw) into
interim data to be analyzed (saved in ../interim).
"""
logger = logging.getLogger(__name__)
logger.info('Making interim train data set from raw data')
# Init absolute path of folders
raw_input_folder_path = os.path.join(DATA_RAW_ROOT, DATASET_NAME, RAW_INPUT_FOLDER)
raw_output_folder_path = os.path.join(DATA_RAW_ROOT, DATASET_NAME, RAW_OUTPUT_FOLDER)
interim_folder_path = os.path.join(DATA_INTERIM_ROOT, DATASET_NAME)
# Read veracities from both test and dev files
veracity_labels = merge_json(
json_from_file(os.path.join(raw_output_folder_path, VERACITY_LABEL_FILE[0])),
json_from_file(os.path.join(raw_output_folder_path, VERACITY_LABEL_FILE[1])))
# Read stances from both test and dev files
stance_labels = merge_json(
json_from_file(os.path.join(raw_output_folder_path, STANCE_LABEL_FILE[0])),
json_from_file(os.path.join(raw_output_folder_path, STANCE_LABEL_FILE[1])))
# If interim data existed, delete and create a new one
if os.path.exists(interim_folder_path):
shutil.rmtree(interim_folder_path)
os.makedirs(interim_folder_path)
for event_name in DATASET_EVENTS:
interim_event_folder_path = os.path.join(interim_folder_path, event_name)
os.makedirs(interim_event_folder_path)
event_folder_path = os.path.join(raw_input_folder_path, event_name)
list_tweet_ids = [name for name in os.listdir(event_folder_path) if os.path.isdir(os.path.join(event_folder_path,name))]
for index, id in enumerate(list_tweet_ids):
# thread conversation folder in raw
source_tweet_folder_path = os.path.join(event_folder_path, id)
# read source tweet
source_tweet_file = open(os.path.join(source_tweet_folder_path,'source-tweet', id + '.json'), 'r')
source_tweet_content = source_tweet_file.read()
source_tweet_file.close()
source_tweet = json.loads(source_tweet_content)
source_tweet_replies = []
# read replies
replies_folder_path = os.path.join(source_tweet_folder_path, 'replies')
list_reply_ids = [name for name in os.listdir(replies_folder_path) if os.path.isfile(os.path.join(replies_folder_path, name))]
for reply_id in list_reply_ids:
reply_file = open(os.path.join(replies_folder_path, reply_id), "r")
reply_content = reply_file.read()
reply_file.close()
reply = json.loads(reply_content)
reply['stance'] = stance_labels[reply['id_str']]
source_tweet_replies.append(reply)
source_tweet['replies'] = source_tweet_replies
# read structure
structure_file = open(os.path.join(source_tweet_folder_path,'structure.json'), "r")
structure_content = structure_file.read()
structure_file.close()
structure = json.loads(structure_content)
source_tweet['structure'] = structure
source_tweet['veracity'] = veracity_labels[source_tweet['id_str']]
source_tweet['stance'] = stance_labels[source_tweet['id_str']]
# create tweet file in interim to write
interim_tweet_file = open(os.path.join(interim_event_folder_path, str(index) + '.json'), "w")
# write tweet to interim
interim_tweet_file.write(json.dumps(source_tweet, indent = 4))
interim_tweet_file.close()
def prepare_test_data():
""" Runs data processing scripts to turn testing raw data from (../raw) into
interim data to be analyzed (saved in ../interim).
"""
logger = logging.getLogger(__name__)
logger.info('Making interim test data set from raw data')
# Init absolute path of folders
raw_input_folder_path = os.path.join(DATA_RAW_ROOT, TESTSET_NAME)
raw_output_folder_path = os.path.join(DATA_RAW_ROOT, TESTSET_NAME)
interim_folder_path = os.path.join(DATA_INTERIM_ROOT, TESTSET_NAME)
# Read veracities from both test and dev files
veracity_labels = json_from_file(os.path.join(raw_output_folder_path, VERACITY_LABEL_TEST_FILE[0]))
# Read stances from both test and dev files
stance_labels = json_from_file(os.path.join(raw_output_folder_path, STANCE_LABEL_TEST_FILE[0]))
# If interim data existed, delete and create a new one
if os.path.exists(interim_folder_path):
shutil.rmtree(interim_folder_path)
os.makedirs(interim_folder_path)
list_tweet_ids = [name for name in os.listdir(raw_input_folder_path) if
os.path.isdir(os.path.join(raw_input_folder_path, name))]
for index, id in enumerate(list_tweet_ids):
# thread conversation folder in raw
source_tweet_folder_path = os.path.join(raw_input_folder_path, id)
# read source tweet
source_tweet_file = open(os.path.join(source_tweet_folder_path, 'source-tweet', id + '.json'), 'r')
source_tweet_content = source_tweet_file.read()
source_tweet_file.close()
source_tweet = json.loads(source_tweet_content)
source_tweet_replies = []
# read replies
replies_folder_path = os.path.join(source_tweet_folder_path, 'replies')
list_reply_ids = [name for name in os.listdir(replies_folder_path) if
os.path.isfile(os.path.join(replies_folder_path, name))]
for reply_id in list_reply_ids:
reply_file = open(os.path.join(replies_folder_path, reply_id), "r")
reply_content = reply_file.read()
reply_file.close()
reply = json.loads(reply_content)
reply['stance'] = stance_labels[reply['id_str']]
source_tweet_replies.append(reply)
source_tweet['replies'] = source_tweet_rep | lies
# read structure
structure_file = open(os.path.join(source_ | tweet_folder_path, 'structure.json'), "r")
structure_content = structure_file.read()
structure_file.close()
structure = json.loads(structure_content)
source_tweet['structure'] = structure
source_tweet['veracity'] = veracity_labels[source_tweet['id_str']]
source_tweet['stance'] = stance_labels[source_tweet['id_str']]
# create tweet file in interim to write
interim_tweet_file = open(os.path.join(interim_folder_path, str(index) + '.json'), "w")
# write tweet to interim
interim_tweet_file.write(json.dumps(source_tweet, indent=4))
interim_tweet_file.close()
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
prepare_train_data()
prepare_test_data() |
smalls12/django_helpcenter | helpcenter/api/tests/test_serializers.py | Python | mit | 6,657 | 0 | import json
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from helpcenter import models
from helpcenter.api import serializers
from helpcenter.api.testing_utils import full_url
from helpcenter.testing_utils import create_article, create_category
class TestArticleSerializer(TestCase):
""" Test cases for the Article serializer """
def setUp(self):
""" Create request factory for hyperlinked serializer """
factory = APIRequestFactory()
self.request = factory.get('helpcenter:helpcenter-api:article-list')
def test_deserialize(self):
""" Test deserializing an Article.
Passing valid data to the serializer should allow the serializer
to construct a new Article from the data.
"""
data = {
'title': 'Test Article',
'body': '<p>Rich <strong>text</strong></p>',
'category_id': None,
}
serializer = serializers.ArticleSerializer(data=data)
self.assertTrue(serializer.is_valid())
article = serializer.save()
self.assertEqual(data['title'], article.title)
self.assertEqual(data['body'], article.body)
def test_serialize(self):
""" Test serializing an article """
article = create_article()
serializer = serializers.ArticleSerializer(
article, context={'request': self.request})
expected_dict = {
'body': article.body,
'category': article.category,
'category_id': None,
'url': full_url('help:api:article-detail',
kwargs={'pk': article.pk}),
'id': article.id,
'time_published': article.time_published.isoformat(),
'title': article.title,
}
expected = json.dumps(expected_dict)
self.assertJSONEqual(expected, serializer.data)
def test_serialize_with_category(self):
""" Test serializing an article with a category.
If an article has a category, that category's url should be
contained in the serializer.
"""
category = create_category()
article = create_article(category=category)
serializer = serializers.ArticleSerializer(
article, context={'request': self.request})
expected_dict = {
'body': article.body,
'category': full_url(
'helpcenter:helpcenter-api:category-detail',
kwargs={'pk': category.pk}),
'category_id': article.category.id,
'id': article.id,
'time_published': article.time_published.isoformat(),
'title': article.title,
'url': full_url('help:api:article-detail',
kwargs={'pk': article.pk}),
}
expected = json.dumps(expected_dict)
self.assertJSONEqual(expected, serializer.data)
def test_update(self):
""" Test updating an existing Article.
If data is passed to an already existing Article, it should
update the existing Article.
"""
article = create_article()
data = {
'title': 'New Awesome Title',
}
serializer = serializers.ArticleSerializer(
article, data=data, partial=True)
self.assertTrue(serializer.is_valid())
updated = serializer.save()
self.assertEqual(1, models.Article.objects.count())
self.assertEqual(data['title'], updated.title)
class TestCategorySerializer(TestCase):
""" Test cases for the Category serializer """
def setUp(self):
""" Create request factory for hyperlinked serializer """
factory = APIRequestFactory()
self.request = factory.get('helpcenter:helpcenter-api:category-list')
def test_deserialize(self):
""" Test deserializing data into a Category instance.
Passing valid data to the serializer should allow the serializer
to construct a new Category instance.
"""
data = {
'title': 'Test Category',
'parent_id': None,
}
serializer = serializers.CategorySerializer(data=data)
self.assertTrue(serializer.is_valid())
category = serializer.save()
self.assertEqual(data['title'], category.title)
self.assertEqual(data['parent_id'], category.parent)
def test_deserialize_with_parent(self):
""" Test deserializing a Category with a parent Category.
The serializer should be able to create a Category with a
parent if the parent's id is given.
"""
parent = create_category(title='Parent Category')
data = {
'title': 'Test Category',
'parent_id': parent.id,
}
serializer = serializers.CategorySerializer(data=data)
self.assertTrue(serializer.is_valid(), msg=serializer.errors)
category = serializer.save()
self.assertEqual(parent, category.parent)
def test_serialize(self):
""" Test serializing a Category instance.
Serializing a Category instance should return a JSON
representation of the instance.
"""
parent = create_category(title='Parent Category')
category = create_category(parent=parent)
serializer = serializers.CategorySerializer(
category, context={'request': self.request})
expected_dict = {
'id': category.id,
'parent': full_url(
'helpcenter:helpcenter-api:category-detail',
kwargs={'pk': parent.pk}),
'parent_id': parent.id,
'title': category.title,
'url': full_url('help:api:category-detail',
kwargs={'pk': category.pk}),
}
expected = json.dumps(expected_dict)
self.assertJSONEqual(
expected,
serializer.data,
| msg='\nExpected: {}\n Actual: {}'.format(
expected, serializer.data))
def test_update(self):
""" Test updating an existing Category.
If data is passed to an existing Category, it should update the
existing instance's data.
"""
category = create_category()
dat | a = {
'title': 'Better Title'
}
serializer = serializers.CategorySerializer(
category, data=data, partial=True)
self.assertTrue(serializer.is_valid())
updated = serializer.save()
self.assertEqual(1, models.Category.objects.count())
self.assertEqual(data['title'], updated.title)
|
gavinfish/leetcode-share | python/201 Bitwise AND of Numbers Range.py | Python | mit | 541 | 0.005545 | '''
Given a range [m, n] where 0 <= m <= n <= 2147483647, return the bitwise AND of all numbers in this range, inclusive.
For example, given the range [5, 7], you should return 4.
'''
c | lass Solution(object):
def rangeBitwiseAnd(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
| while n > m:
n &= n - 1
return n
if __name__ == "__main__":
assert Solution().rangeBitwiseAnd(5, 7) == 4
assert Solution().rangeBitwiseAnd(7, 15) == 0 |
xionluhnis/decnet-scripts | test.py | Python | mit | 1,462 | 0.008208 | #!/usr/bin/env python
## path configuration
caffe_root = './caffe'
script_path = '.'
caffe_model = script_path + '/soccer.prototxt'
caffe_weight = script_path + '/snapshot/superlatefusion_iter_15000.caffemodel'
caffe_inference_weight = script_path + '/superlatefusion_inference.caffemodel'
### start generate caffemodel
print 'start generating BN-testable caffemodel'
print 'caffe_root: %s' % caffe_root
print 'script_path: %s' % script_path
print 'caffe_model: %s' % caffe_model
print 'caffe_weight: %s' % caffe_weight
print 'caffe_inference_weight: %s' % caffe_inference_weight
import numpy | as np
import sys
sys.path.append(caffe_root+'/python')
import caffe
from caffe.proto import caffe_pb2
import cv2
net = caffe.Net(caffe_model, caffe_weight)
net.set_mode_cpu()
net.set_phase_test()
def forward_once(net):
start_ind = 0
end_ind = len(net.layers) - 1
net._forward(start_ind, end_ind)
return {out: net.blobs[out].data for out in net.outputs}
print net.params.keys()
| res = forward_once(net)
layers = ['data', 'data2', 'seg-label', 'input', 'conv1', 'conv2', 'conv3', 'seg-score']
import os
# debug output
for name in layers:
if not os.path.exists('debug/%s' % name):
os.mkdir('debug/%s' % name)
blob = net.blobs[name]
for b in range(0, 16):
for c in range(0, blob.data[b].shape[0]):
cv2.imwrite('debug/%s/%d-%d.png' %(name, b, c), np.squeeze(blob.data[b][c]))
print 'done'
|
offbye/PiBoat | pyboat/piboat.py | Python | apache-2.0 | 2,413 | 0.003374 | #!/usr/bin/python
# -*- encoding: UTF-8 -*-
# SockBoatServer created on 15/8/30 下午3:49
# Copyright 2014 offbye@gmail.com
"""
"""
__author__ = ['"Xitao":<offbye@gmail.com>']
from SocketServer import ThreadingTCPServer, StreamRequestHandler
import traceback
import threading
import os,sys
from pi_pwm import PiPWM
timer_interval = 2
class MyStreamRequestHandlerr(StreamRequestHandler):
def handle(self):
# t = threading.Timer(5.0, self.sayhello)
# t.start()
while True:
try:
data = self.rfile.readline().strip()
if data != '':
print "receive from (%r):%r" % (self.client_address, data)
self.wfile.write(data.upper())
if | data == "gps":
self.wfile.write(get_gps())
elif data == | "reboot":
os.system('reboot')
sys.exit()
elif data == "halt":
os.system("shutdown -r -t 5 now")
sys.exit()
elif data == "rtsp":
os.system("raspivid -o - -w 640 -h 360 -t 9999999 |cvlc -vvv stream:///dev/stdin --sout '#rtp{sdp=rtsp://:8554/}' :demux=h264 & ")
elif data[0:2] == "m1":
print("---" + data.upper())
pwm.motor_set(float(data.split(",")[1]))
elif data[0:2] == "s1":
print("---" + data.upper())
pwm.servo1_set(float(data.split(",")[1]))
except:
traceback.print_exc()
self.finish()
break
def sayhello(self):
print "hello"
self.wfile.write("hello")
global t #Notice: use global variable!
t = threading.Timer(2.0, self.sayhello)
t.start()
def get_gps():
print "-----gps"
return "lat,lat"
if __name__ == "__main__":
host = "" # 主机名,可以是ip,像localhost的主机名,或""
port = 9999 # 端口
addr = (host, port)
try:
pwm = PiPWM()
# ThreadingTCPServer从ThreadingMixIn和TCPServer继承
#class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
server = ThreadingTCPServer(addr, MyStreamRequestHandlerr)
server.serve_forever()
except KeyboardInterrupt:
pass
server.server_close()
pwm.stop()
|
mkaplenko/mobilmoney_sms | client.py | Python | gpl-2.0 | 1,933 | 0.001046 | # -*- coding: utf-8 -*-
__author__ = 'mkaplenko'
import httplib
import time
class MobilMoneySms(object):
def __init__(self, phone_to, message):
self.phone_to = phone_to
self.message = message
self.sync = int(time.time()*100)
class MobilMoneySmsClient(object):
connection_host = 'gate.mobilmoney.ru'
response = None
sms = None
sync = 1
def __init__(self, login, password, originator):
self.login = login
self.password = password
self.originator = originator
def register_sms(self, sms_instance):
self.sms = sms_instance
def request_body(self):
data_kwargs = {
'login': self.login,
'password': self.password,
'originator': self.originator,
'phone_to': self.sms.phone_to,
'message': self.sms.message,
'sync': unicode(self.sms.sync)
}
data = u'''
<?xml version="1.0" encoding="utf-8"?>
<request method="SendSMSFull">
<login>{login}</login>
<pwd>{password}</pwd>
<originator>{originator}</originator>
<phone_to>{phone_to}</phone_to>
<message>{message}</message>
<sync>{sync}</sync>
</request>
'''.format(**data_kwargs).encode('utf-8')
return data
def send_sms(self):
connection = httplib.HTTPConnection(self.connection_host)
connection.request('POST', '/', self.reque | st_body())
self.response = connection.getresponse()
@property
def answer(self):
return self.response.read() if self.response else None
if __name__ == '__main__':
sms = MobilMoneySms('+79151 | 234567', u'Привет мир! Я тестирую смс!')
client = MobilMoneySmsClient('my_login', 'my_password', 'my_originator_name')
client.register_sms(sms)
client.send_sms()
print(client.answer)
|
emgirardin/compassion-modules | sbc_compassion/tools/zbar_wrapper.py | Python | agpl-3.0 | 2,203 | 0 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emmanuel Girardin <emmanuel.girardin@outlook.com>
#
# The licence is in the file __openerp__.py
#
##############################################################################
""" This wrapper allows to scan QRCodes thanks to the ZBar | library. When no
ZBar is detected, it apply a few filter on the input image and try the
scanning agai | n. This technique reduces the number of false negative."""
import zbar
import cv2 # we use openCV to repair broken QRCodes.
from PIL import Image
def scan_qrcode(filename):
qrdata = None
result = _decode(filename)
# map the resulting object to a dictionary compatible with our software
if result:
qrdata = {}
qrdata["data"] = result.data
qrdata["format"] = result.type
qrdata["points"] = result.location
qrdata["raw"] = result.data
return qrdata
def _scan(img, scanner=None):
# convert cv image to raw data
pil = Image.fromarray(img)
width, height = pil.size
raw = pil.tostring()
# wrap image data
image = zbar.Image(width, height, 'Y800', raw)
if not scanner:
# create a reader
scanner = zbar.ImageScanner()
# configure the reader
scanner.parse_config('enable')
# scan the image for barcodes
scanner.scan(image)
# extract results
qrcode = None
for symbol in image:
qrcode = symbol
return qrcode
def _decode(filename):
# create a reader
scanner = zbar.ImageScanner()
# configure the reader
scanner.parse_config('enable')
# obtain image data
img = cv2.imread(filename)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
qrcode = _scan(img, scanner=scanner)
if not qrcode:
# No QR found, so we try to again after an opening operation
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
qrcode = _scan(img, scanner=scanner)
return qrcode
|
t3dev/odoo | addons/digest/models/digest_tip.py | Python | gpl-3.0 | 784 | 0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
from odoo.tools.translate import html_translate
class DigestTip(models.Model):
_name = 'digest.tip'
_description = 'Digest T | ips'
_order = 'sequence'
sequence = fields.Integer(
'Sequence', default=1,
help='Used to display digest tip in email template base on order')
user_ids = fields.Many2many(
'res.users', string='Recipients',
help='Users having already received this tip')
tip_description = fields.Html('Tip description', transla | te=html_translate)
group_id = fields.Many2one(
'res.groups', string='Authorized Group',
default=lambda self: self.env.ref('base.group_user'))
|
CarlFK/veyepar | dj/main/models.py | Python | mit | 22,252 | 0.011639 | # models.py
import os
import socket
import datetime
import random
import re
from django import forms
from django import urls
from django.db import models
from django.db.models.signals import pre_save
from .unique_slugify import u | nique_slugify
from .titlecase import titlecase
from functools import reduce
def time2s(time):
""" given 's.s' or 'h:m:s.s' returns s.s """
if time:
sec = reduce(lambda x, i: x*60 + i,
list(map(float, time.split(':'))))
else:
sec = 0.0
return sec
class Client(models.Model):
sequence = models.IntegerField(defaul | t=1)
active = models.BooleanField(default=True,
help_text="Turn off to hide from UI.")
name = models.CharField(max_length=135)
slug = models.CharField(max_length=135, blank=True, null=False,
help_text="dir name to store input files", )
contacts = models.CharField(max_length=300, blank=True,
help_text='emails of people putting on the event.')
description = models.TextField(blank=True)
tags = models.TextField(null=True,blank=True,)
tweet_prefix = models.CharField(max_length=30, blank=True, null=True)
bucket_id = models.CharField(max_length=30, blank=True, null=True)
category_key = models.CharField(max_length=30, blank=True, null=True,
help_text = "Category for Richard")
# video encoding assets
template_mlt = models.CharField(max_length=60, null=True,
default="template.mlt",
help_text='template to make cutlist mlt from.')
title_svg = models.CharField(max_length=60, null=True,
default="title.svg",
help_text='template for event/title/authors title slide.')
preroll = models.CharField(max_length=335, blank=True,
help_text="name of video to prepend (not implemented)")
postroll = models.CharField(max_length=335, blank=True,
help_text="name of video to postpend (not implemented)")
credits = models.CharField(max_length=30, blank=True,
default="ndv-169.png",
help_text='added to end, store in assets dir')
# remote accounts to post to
host_user = models.CharField(max_length=30, blank=True, null=True,
help_text = "depricated - do not use.")
youtube_id = models.CharField(max_length=10, blank=True, null=True,
help_text = "key to lookup user/pw/etc from pw store" )
archive_id = models.CharField(max_length=10, blank=True, null=True)
vimeo_id = models.CharField(max_length=10, blank=True, null=True)
blip_id = models.CharField(max_length=10, blank=True, null=True)
rax_id = models.CharField(max_length=10, blank=True, null=True)
richard_id = models.CharField(max_length=10, blank=True, null=True)
email_id = models.CharField(max_length=10, blank=True, null=True)
tweet_id = models.CharField(max_length=10, blank=True, null=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return urls.reverse('client', [self.slug,])
class Meta:
ordering = ["sequence"]
class Location(models.Model):
sequence = models.IntegerField(default=1)
active = models.BooleanField( default=True,
help_text="Turn off to hide from UI.")
default = models.BooleanField(default=True,
help_text="Adds this loc to new Clients.")
name = models.CharField(max_length=135,
help_text="room name")
slug = models.CharField(max_length=135, blank=True, null=False,
help_text="dir name to store input files")
dirname = models.CharField(max_length=135, blank=True,
help_text="path to raw files. overrieds show/slug.")
channelcopy = models.CharField(max_length=2, blank=True,
help_text='audio adjustment for this room')
hours_offset = models.IntegerField(blank=True, null=True,
help_text='Adjust for bad clock setting')
description = models.TextField(blank=True)
lon = models.FloatField(null=True, blank=True )
lat = models.FloatField(null=True, blank=True )
def natural_key(self):
return self.name
def __str__(self):
return "%s" % ( self.name )
class Meta:
ordering = ["name"]
ANN_STATES=((1,'preview'),(2,'review'),(3,'approved'))
class Show(models.Model):
client = models.ForeignKey(Client)
locations = models.ManyToManyField(Location,
limit_choices_to={'active': True},
blank=True)
sequence = models.IntegerField(default=1)
active = models.BooleanField( default=True,
help_text="Turn off to hide from UI.")
name = models.CharField(max_length=135)
slug = models.CharField(max_length=135, blank=True, null=False,
help_text="dir name to store input files")
category_key = models.CharField(max_length=30, blank=True, null=True,
help_text = "Category for Richard")
youtube_playlist_id = models.CharField(max_length=50, blank=True, null=True,
help_text = "Playlist ID for YouTube")
tags = models.TextField(null=True,blank=True,)
description = models.TextField(blank=True)
conf_url = models.CharField(max_length=200, null=True, blank=True)
schedule_url = models.CharField(max_length=235, null=True, blank=True)
announcement_state = models.IntegerField(null=True, blank=True,
choices=ANN_STATES, default=ANN_STATES[1][0], )
@property
def client_name(self):
return self.client
def __str__(self):
return "%s: %s" % ( self.client_name, self.name )
@models.permalink
def get_absolute_url(self):
return ('episode_list', [self.client.slug,self.slug,])
class Meta:
ordering = ["sequence"]
class Raw_File(models.Model):
location = models.ForeignKey(Location)
show = models.ForeignKey(Show)
filename = models.CharField(max_length=135,help_text="filename.dv")
filesize = models.BigIntegerField(default=1,help_text="size in bytes")
start = models.DateTimeField(null=True, blank=True,
help_text='when recorded (should agree with file name and timestamp)')
duration = models.CharField(max_length=11, blank=True, )
end = models.DateTimeField(null=True, blank=True)
trash = models.BooleanField(default=False,
help_text="This clip is trash")
ocrtext = models.TextField(null=True,blank=True)
comment = models.TextField(blank=True)
def __next__(self):
"""
gets the next clip in the room.
"""
rfs = Raw_File.objects.filter(location=self.location,
start__gt=self.start,
).order_by('start','id')
# id__gt=self.id).order_by('start','id')
if rfs:
rf=rfs[0]
else:
rf=None
return rf
def basename(self):
# strip the extension
# good for making 1-2-3/foo.png from 1-2-3/foo.dv
raise "homey don't play that no more."
return os.path.splitext(self.filename)[0]
def base_url(self):
""" Returns the url for the file, minus the MEDIA_URL and extension """
return "%s/%s/dv/%s/%s" % (self.show.client.slug,
self.show.slug,
self.location.slug,
self.filename)
@property
def get_adjusted_start(self):
return self.start + datetime.timedelta(
hours = 0 if self.location.hours_offset is None
else self.location.hours_offset )
@property
def get_adjusted_end(self):
return self.end + datetime.timedelta(
hours = 0 if self.location.hours_offset is None
else self.location.hours_offset )
def get_start_seconds(self):
return time2s( self.start )
def get_end_seconds(self):
return time2s( self.end )
def get_seconds(self):
# return durration in seconds (float)
delta = self.end - self.start
seconds = delta.days*24*60*60 + delta.seconds
return seconds
def get_minutes(self):
# return durration in minutes (float)
return self.get_seconds()/60.0
def __str__(self):
return self.filename
@models.per |
xybydy/kirilim | utils.py | Python | gpl-2.0 | 934 | 0 | import sys
from time import sleep
from colored import stylize, fg, attr
def flush(msg, err=None, fast=None, wait=0, code='reg'):
codes = dict(
error=fg('red') + attr('bold'),
reg=fg(28) + attr('bold'),
blue=fg('blue')
)
if err:
if fast:
print(stylize('\n[-] {0}'.format(msg), codes['error']), end='')
else:
sys.stdout.write(stylize('\n[-] | ', codes['error']))
for char in msg:
sys.stdout.write(stylize('%s' % | char, codes['error']))
sys.stdout.flush()
sleep(wait)
else:
if fast:
print(stylize('\n[+] {0}'.format(msg), codes[code]), end='')
else:
print(stylize('\n[+] ', codes[code]), end='')
for char in msg:
sys.stdout.write(stylize('%s' % char, codes[code]))
sys.stdout.flush()
sleep(wait)
|
Ruide/angr-dev | angr-management/angrmanagement/ui/widgets/qstring_table.py | Python | bsd-2-clause | 3,604 | 0.001942 |
from PySide.QtGui import QTableWidget, QTableWidgetItem, QColor, QAbstractItemView
from PySide.QtCore import Qt
from angr.analyses.cfg.cfg_fast import MemoryData
from ...utils import filter_string_for_display
class QStringTableItem(QTableWidgetItem):
def __init__(self, mem_data, *args, **kwargs):
super(QStringTableItem, self).__init__(*args, **kwargs)
self._mem_data = mem_data # type: MemoryData
def widgets(self):
"""
:return: a list of QTableWidgetItem objects
:rtype: list
"""
str_data = self._mem_data
address = "%#x" % str_data.address
length = "%d" % str_data.size
content = filter_string_for_display(str_data.content)
widgets = [
QTableWidgetItem(address),
QTableWidgetItem(length),
QTableWidgetItem(content),
]
for w in widgets:
w.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
return widgets
class QStringTable(QTableWidget):
def __init__(self, parent, selection_callback=None):
super(QStringTable, self).__init__(parent)
self._selected = selection_callback
header_labels = [ 'Address', 'Length', 'String' ]
self.setColumnCount(len(header_labels))
self.setHorizontalHeaderLabels(header_labels)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setShowGrid(False)
self.verticalHeader().setVisible(False)
self.verticalHeader().setDefaultSectionSize(24)
self.setHorizontalScrollMode(self.ScrollPerPixel)
self._cfg = None
self._function = None
self.items = [ ]
# self.itemDoubleClicked.connect(self._on_string_selected)
self.cellDoubleClicked.connect(self._on_string_selected)
#
# Properties
#
@property
def cfg(self):
return self._cfg
@cfg.setter
def cfg(self, v):
self._cfg = v
self.reload()
@property
def function(self):
return self._function
@function.setter
def function(self, v):
if v is not self._function:
self._function = v
self.reload()
#
# Public methods
#
def reload(self):
current_row = self.currentRow()
self.clearContents()
if self._cfg is None:
return
self.items = [ ]
for f in self._cfg.memory_data.values():
if f.sort == 'string':
if self._function is None:
self.items.append(QStringTableItem(f))
else:
for irsb_addr, _, _ in f.refs:
| if irsb_addr in self._function.block_addrs_set:
self.items.append(QStringTableItem(f))
break
items_count = len(sel | f.items)
self.setRowCount(items_count)
for idx, item in enumerate(self.items):
for i, it in enumerate(item.widgets()):
self.setItem(idx, i, it)
if 0 <= current_row < len(self.items):
self.setCurrentCell(current_row, 0)
self.setVisible(False)
self.resizeColumnsToContents()
self.setVisible(True)
#
# Event handlers
#
def _on_string_selected(self, *args):
selected_index = self.currentRow()
if 0 <= selected_index < len(self.items):
selected_item = self.items[selected_index]
else:
selected_item = None
if self._selected is not None:
self._selected(selected_item)
|
hanipcode/norinproject | build/bdist.win-amd64/winexe/temp/wx._gdi_.py | Python | mit | 358 | 0.011173 |
def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except NameError:
dirname = sys.prefix
path = os.path.join(dirname, 'wx._gdi_.pyd')
#print "py2exe extension module", __name__, "->", path
mod = imp.load_dynamic(__n | ame__, path)
## mod.frozen = 1 |
__load()
del __load
|
terrence2/OpenActuator | OpenActuator/app_a/diagnostic_led.py | Python | gpl-3.0 | 838 | 0.002387 | import machine
import time
DIAGNOSTIC_LED = None
try:
with open('config/diagnostic_led.pin', 'r') as fp:
invert = False
value = int(fp.read())
if value < 0:
value = -value
invert = True
DIAGNOSTIC_LED | = machine.Signal(value, machine.Pin.OUT, invert=invert)
DIAGNOST | IC_LED.off()
except:
pass
def blink_forever(cycle_period_ms):
while True:
blink_once(cycle_period_ms)
def blink_n(cycle_period_ms, count):
i = 0
while i < count:
blink_once(cycle_period_ms)
i += 1
def blink_once(cycle_period_ms):
half_period = cycle_period_ms // 2
if DIAGNOSTIC_LED is not None:
DIAGNOSTIC_LED.on()
time.sleep_ms(half_period)
if DIAGNOSTIC_LED is not None:
DIAGNOSTIC_LED.off()
time.sleep_ms(half_period)
|
ideascube/ideascube | ideascube/tests/test_tags_command.py | Python | agpl-3.0 | 4,017 | 0.000747 | from django.core.management import call_command
import pytest
from ideascube.mediacenter.tests.factories import DocumentFactory
from taggit.models import Tag
pytestmark = pytest.mark.django_db
def test_count_should_count_usage(capsys):
DocumentFactory.create_batch(size=4, tags=['tag1'])
call_command('tags', 'count', 'tag1')
out, err = capsys.readouterr()
assert '4 object(s)' in out
def test_rename_should_rename_tag():
doc = DocumentFactory(tags=['tag1'])
tag1 = Tag.objects.get(name='tag1')
call_command('tags', 'rename', 'tag1', 'tag2')
assert not Tag.objects.filter(name='tag1')
assert 'tag2' in doc.tags.names()
assert 'tag1' not in doc.tags.names()
assert tag1.id == Tag.objects.get(name='tag2').id
def test_rename_should_exit_on_non_existing_tag():
DocumentFactory(tags=['tag1'])
with pytest.raises(SystemExit):
call_command('tags', 'rename', 'tag3', 'tag2')
assert Tag.objects.filter(name='tag1')
def test_rename_should_exit_if_new_name_already_exists():
DocumentFactory(tags=['tag1'])
Tag.objects.create(name='tag2')
with pytest.raises(SystemExit):
call_command('tags', 'rename', 'tag1', 'tag2')
assert Tag.objects.filter(name='tag1')
assert Tag.objects.filter(name='tag2')
def test_replace_should_replace_and_delete_tag():
tag1 = Tag.objects.create(name='tag1')
tag2 = Tag.objects.create(name='tag2')
doc = DocumentFactory(tags=[tag1])
call_command('tags', 'replace', 'tag1', 'tag2')
assert not Tag.objects.filter(name='tag1')
assert tag1 not in doc.tags.all()
assert tag2 in doc.tags.all()
assert tag1.id != tag2.id
def test_replace_should_create_if_needed():
doc = DocumentFactory(tags=['tag1'])
call_command('tags', 'replace', 'tag1', 'tag2')
tag2 = Tag.objects.get(name='tag2')
assert not Tag.objects.filter(name='tag1')
assert 'tag1' not in doc.tags.names()
assert tag2 in doc.tags.all()
def test_delete_should_delete_tag():
tag1 = Tag.objects.create(name='tag1')
call_command('tags', 'delete', 'tag1')
assert not Tag.objects.filter(name='tag1')
assert not Tag.objects.filter(pk=tag1.pk)
def test_delete_should_exit_on_non_existing_tag():
with pytest.raises(SystemExit):
call_command('tags', 'delete', 'tag1')
def test_list_should_list_tags_and_slugs(capsys):
tag = Tag.objects.create(name='Some Tag')
call_command('tags', 'list',)
out, err = capsys.readouterr()
assert tag.name in out
assert tag.slug in out
def test_sanitize_tags():
foo = Tag.objects.create(name='foo')
Foo = Tag.objects.create(name='Foo')
Bar = Tag.objects.create(name='Bar') # Create a tag with upper case first.
bar = Tag.objects.create(name='bar')
bar_ = Tag.objects.create(name='bar;')
Bar_ = Tag.objects.create(name='Bar;')
tag_to_delete = Tag.objects.create(name=':')
clean = Tag.objects.create(name="Other:")
half_clean1 = Tag.objects.create(name="Other:Foo,")
half_clean2 = Tag.objects.create(name="Other:foo")
doc1 = DocumentFactory(tags=[foo, bar, clean])
doc2 = DocumentFactory(tags=[foo, Bar, clean, half_clean2])
doc3 = DocumentFactory(tags=[Foo, bar])
doc4 = DocumentFactory(tags=[Foo, | Bar_, half_clean1, half_clean2])
doc5 = DocumentFactory(tags=[Foo, foo, bar_, Bar])
doc6 = DocumentFactory(tags=[Foo, foo, Bar_, tag_to_delete])
call_command('tags', 'sanitize')
all_tag_names = list(Tag.objects.all().order_by('name')
.values_list('name', flat=True) | )
assert all_tag_names == ['bar', 'foo', 'other', 'other:foo']
assert sorted(doc1.tags.names()) == ['bar', 'foo', 'other']
assert sorted(doc2.tags.names()) == ['bar', 'foo', 'other', 'other:foo']
assert sorted(doc3.tags.names()) == ['bar', 'foo']
assert sorted(doc4.tags.names()) == ['bar', 'foo', 'other:foo']
assert sorted(doc5.tags.names()) == ['bar', 'foo']
assert sorted(doc6.tags.names()) == ['bar', 'foo']
|
guker/spear | config/grid/para_training_local.py | Python | gpl-3.0 | 1,092 | 0.032051 | #!/usr/bin/env python
# setup of the grid parameters
# default queue used for training
training_queue = { 'queue':'q1dm', 'memfree':'16G', 'pe_opt':'pe_mth 2', 'hvmem':'8G', 'io_big':True }
# the queue that is used solely for the final ISV training step
isv_training_queue = { 'queue':'q1wm', 'memfree':'32G', 'pe_opt':'pe_mth 4', 'hvmem':'8G' }
# number of audio files that one job should preprocess
number_of_audio_files_per_job = 1000
preprocessing_queue = {}
# number of features that one job should extract
number_of_features_per_job = 600
extraction_queue = { 'queue':'q1d', 'memfree':'8G' }
# number of features that one job should project
number_of_projections_per_job = 600
projection_queue = { 'queue':'q1d', 'hvmem':'8G', 'memfree':'8G' }
# number of models that one job s | hould enroll
number_of_models_per_enrol_job = 20
enrol_queue = { 'queue':'q1d', 'memfree':'4G', 'io_big':True }
# number of models that one score job should process
number_of_models_per_score_job = 20
score_queue = { 'queue':'q1d', 'memfree':'4G', 'io_big':True }
grid_type = 'local' | # on Idiap grid
|
ir0nb8t/tutorials | automateTheBoringStuff/vailidateInput.py | Python | gpl-3.0 | 344 | 0 | while True:
pr | int('Enter your age:')
age = input()
if age.isdecimal():
break
print('Please enter a number for your age.')
while True:
print('Select a new password (letters and numbers only):')
password = input()
if password.isalnum():
break
print('Passwords can onl | y have letters and numbers.')
|
aepifanov/mos_mu | modules/pkgs_verify_md5.py | Python | apache-2.0 | 4,932 | 0.003447 | #!/usr/bin/env python
from ansible.module_utils.basic import AnsibleModule
from subprocess import Popen, PIPE
from os import path
import re
import yaml
def parse_verify_output(output_lines, pkg_name, pkg_ver=None,
ex_re_list=None, cmd_md5sum=False):
result = []
for line in output_lines:
if cmd_md5sum:
parts = line.split(': ')
else:
parts = line.split()
parts.reverse()
if ex_re_list:
skip_line = False
for elem in ex_re_list:
regex = None
if type(elem) is dict:
if elem['package_name'] == pkg_name:
if 'package_version' in elem:
if elem['package_version'] == pkg_ver:
regex = elem['exclude_regex']
else:
regex = elem['exclude_regex']
else:
regex = elem;
if regex:
if type(regex) is list:
for r in regex:
if re.search(r, parts[0]):
skip_line = True
break
elif re.search(regex, parts[0]):
skip_line = True
if skip_line:
break
if skip_line:
continue
# c - config file flag
if len(parts) == 3 and parts[1] == 'c':
continue
# parts[0][2] - md5 checksum fail flag
if cmd_md5sum or parts[-1][2] == '5':
file = parts[0]
verify_details = parts[-1]
result.append({'file': file, 'details': verify_details})
return result
def main():
exclude = ['^\.?/?etc/',
'^\.?/?root/',
'\.py[co]$',
'/usr/share/openstack-dashboard/static/dashboard/manifest.json'
]
module = AnsibleModule(
argument_spec=dict(
exclude_filter_file={'required': False, 'default': None}))
if path.exists('/etc/redhat-release'):
cmd = 'rpm -qa --qf "%{NAME}\\t%{EPOCH}\\t%{VERSION}\\t%{RELEASE}\\n"'
distr = 'centos'
elif path.exists('/usr/bin/lsb_release'):
cmd = "dpkg-query -W -f='${Package}\\t${Version}\\n'"
distr = 'ubuntu'
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
out = out.splitlines()
err = err.splitlines()
if p.returncode:
module.fail_json(msg='command exited non-zero', cmd=cmd,
rc=p.returncode, out_lines=out, err_lines=err)
result = []
if module.params['exclude_filter_file']:
with open(module.params['exclude_filter_file'],'r') as f:
content = f.read()
if content:
exclude += yaml.load(content)
for line in out:
item = {}
md5_errors = []
cmd_md5sum = False
if distr == 'centos':
pkg_name, pkg_epoch, pkg_version, pkg_release = line.split('\t')
if pkg_epoch == '0' or pkg_epoch == '(none)':
pkg_verstr = '%s-%s' % (pkg_version, pkg_release)
else:
pkg_verstr = '%s:%s-%s' % (pkg_epoch, pkg_version, pkg_release)
cmd = 'nice -n 19 ionice -c 3 rpm --verify %s' % pkg_name
elif distr == 'ubuntu':
pkg_name, pkg_verstr = line.split('\t')
dpkg = [s for s in out if s.startswith('dpkg\t')][0]
dpkg_ver = dpkg.split('\t')[1]
if dpkg_ver >= '1.17':
cmd = 'nice -n 19 ionice -c 3 dpkg --verify %s' % pkg_name
else:
if path.exists('/var/lib/dpkg/info/%s:amd64.md5sums' % pkg_name):
md5_file = '/var/lib/dpkg/info/%s:amd64.md5sums'
elif path.exists | ('/var/lib/dpkg/info/%s.md | 5sums' % pkg_name):
md5_file = '/var/lib/dpkg/info/%s.md5sums'
if md5_file:
cmd = ('cd /; nice -n 19 ionice -c 3 md5sum --quiet -c '
'%s 2>&1') % md5_file
cmd_md5sum = True
else:
# no md5 file, skipping
continue
item['package_name'] = pkg_name
item['package_version'] = pkg_verstr
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
verify_out, verify_err = p.communicate()
verify_out = verify_out.splitlines()
if verify_out:
md5_errors = parse_verify_output(verify_out, pkg_name,
pkg_ver=pkg_verstr, ex_re_list=exclude, cmd_md5sum=cmd_md5sum)
if md5_errors:
item['md5_errors'] = md5_errors
result.append(item)
changed = True if result else False
module.exit_json(changed=changed, result=result)
if __name__ == '__main__':
main()
|
uclouvain/OSIS-Louvain | education_group/ddd/service/write/copy_training_service.py | Python | agpl-3.0 | 2,322 | 0.002154 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.db import transaction
from education_group.ddd import command
from education_group.ddd.domain import exception
from educat | ion_group.ddd.domain.training import TrainingIdentity
from ddd.logic.formation_catalogu | e.builder.training_builder import TrainingBuilder
from education_group.ddd.repository import training as training_repository
@transaction.atomic()
def copy_training_to_next_year(copy_cmd: command.CopyTrainingToNextYearCommand) -> 'TrainingIdentity':
# GIVEN
repository = training_repository.TrainingRepository()
existing_training = repository.get(
entity_id=TrainingIdentity(acronym=copy_cmd.acronym, year=copy_cmd.postpone_from_year)
)
# WHEN
training_next_year = TrainingBuilder().copy_to_next_year(existing_training, repository)
# THEN
try:
with transaction.atomic():
identity = repository.create(training_next_year)
except exception.TrainingAcronymAlreadyExistException:
identity = repository.update(training_next_year)
return identity
|
ericmjl/bokeh | bokeh/core/validation/__init__.py | Python | bsd-3-clause | 3,227 | 0.005888 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' The validation module provides the capability to perform integrity
checks on an entire collection of Bokeh models.
To create a Bokeh visualization, the central task is to assemble a collection
model objects from |bokeh.models| into a graph that represents the scene that
should be created in the client. It is possible to to this "by hand", using the
model objects directly. However, to make this process easier, Bokeh provides
higher level interfaces such as |bokeh.plotting| for users.
These interfaces automate common "assembly" steps, to ensure a Bokeh object
graph is created in a consistent, predictable way. However, regardless of what
interface is used, it is possible to put Bokeh models together in ways that are
incomplete, or that do not make sense in some way.
To assist with diagnosing potential problems, Bokeh performs a validation step
when outputting a visualization for display. This module contains error and
warning codes as well as helper functions for defining validation checks.
One use case for warnings is to loudly point users in the right direction
when they accidentally do something that they probably didn't mean to do - this
is the case for EMPTY_LAYOUT for instance. Since warnings don't necessarily
indicate misuse, they are configurable. To silence a warning, use the silence
function provided.
.. code-block:: python
>>> from bokeh.core.validation import silence
>>> from bokeh.core.validation.warnings import EMPTY_LAYOUT
>>> silence(EMPTY_LAYOUT, True)
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# | Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from .check import check_integrity, silence, silenced
from .decorators import error, warning
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-------------------------------------------------------------------------- | ---
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
youlanhai/ExcelToCode | xl2code/writers/lua_writer.py | Python | mit | 2,039 | 0.038744 | # -*- coding: utf-8 -*-
from base_writer import BaseWriter
class LuaWriter(BaseWriter):
def begin_write(self):
super(LuaWriter, self).begin_write()
self.output("module(...)", "\n\n")
def write_sheet(self, name, sheet):
self.write_value(name, sheet)
if name == "main_sheet":
self.write_value("main_length", len(sheet), 0)
keys = sheet.keys()
keys.sort()
self.write_value("main_keys", keys, 0)
self.flush()
def write_value(self, name, value, max_indent = None):
self.write_types_comment(name)
if max_indent is None:
max_indent = self.max_indent
self.output(name, " = ")
self.write(value, 1, max_indent)
self.output("\n\n")
self.flush()
def write_comment(self, comment):
self.output("-- ", comment, "\n")
def write(self, value, indent = 1, max_indent = 0):
output = self.output
if v | alue is None:
return output("nil")
tp = type(value)
if tp == bool:
output("true" if value else "false")
elif tp == int:
output("%d" % (value, ))
elif tp == float:
output("%g" % (value, ))
elif tp == str:
output('"%s"' %(value, ))
elif tp == unicode:
output('"%s"' % (value.encode("utf-8"), ))
elif | tp == tuple or tp == list:
output("{")
for v in value:
self.newline_indent(indent, max_indent)
self.write(v, indent + 1, max_indent)
output(", ")
if len(value) > 0 and indent <= max_indent:
output("\n")
self._output(indent - 1, "}")
else:
output("}")
elif tp == dict:
output("{")
keys = value.keys()
keys.sort()
for k in keys:
self.newline_indent(indent, max_indent)
output("[")
self.write(k)
output("] = ")
self.write(value[k], indent + 1, max_indent)
output(", ")
if len(value) > 0 and indent <= max_indent:
output("\n")
self._output(indent - 1, "}")
else:
output("}")
else:
raise TypeError, "unsupported type %s" % (str(tp), )
return
def newline_indent(self, indent, max_indent):
if indent <= max_indent:
self.output("\n")
self._output(indent)
|
e4p/dsub | dsub/providers/google.py | Python | apache-2.0 | 46,657 | 0.004479 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provider for running jobs on Google Cloud Platform.
This module implements job creation, listing, and canceling using the
Google Genomics Pipelines and Operations APIs.
"""
# pylint: disable=g-tzinfo-datetime
from datetime import datetime
import itertools
import json
import os
import re
import socket
import string
import sys
import textwrap
from . import base
from .._dsub_version import DSUB_VERSION
import apiclient.discovery
import apiclient.errors
from ..lib import param_util
from ..lib import providers_util
from oauth2client.client import GoogleCredentials
from oauth2client.client import HttpAccessTokenRefreshError
import pytz
import retrying
_PROVIDER_NAME = 'google'
# Create file provider whitelist.
_SUPPORTED_FILE_PROVIDERS = frozenset([param_util.P_GCS])
_SUPPORTED_LOGGING_PROVIDERS = _SUPPORTED_FILE_PROVIDERS
_SUPPORTED_INPUT_PROVIDERS = _SUPPORTED_FILE_PROVIDERS
_SUPPORTED_OUTPUT_PROVIDERS = _SUPPORTED_FILE_PROVIDERS
# Environment variable name for the script body
SCRIPT_VARNAME = '_SCRIPT'
# Mount point for the data disk on the VM and in the Docker container
DATA_MOUNT_POINT = '/mnt/data'
# Special dsub directories within the Docker container
#
# Attempt to keep the dsub runtime environment sane by being very prescriptive.
# Assume a single disk for everything that needs to be written by the dsub
# runtime environment or the user.
#
# Backends like the Google Pipelines API, allow for the user to set both
# a boot-disk-size and a disk-size. But the boot-disk-size is not something
# that users should have to worry about, so don't put anything extra there.
#
# Put everything meaningful on the data disk:
#
# input: files loc | alized from object storage
# output: files to de-localize to object storage
#
# script: any code that dsub writes (like the user script)
# tmp: set TMPDIR in the environment to | point here
#
# workingdir: A workspace directory for user code.
# This is also the explicit working directory set before the
# user script runs.
SCRIPT_DIR = '%s/script' % DATA_MOUNT_POINT
TMP_DIR = '%s/tmp' % DATA_MOUNT_POINT
WORKING_DIR = '%s/workingdir' % DATA_MOUNT_POINT
MK_RUNTIME_DIRS_COMMAND = '\n'.join(
'mkdir --mode=777 -p "%s" ' % dir
for dir in [SCRIPT_DIR, TMP_DIR, WORKING_DIR])
DOCKER_COMMAND = textwrap.dedent("""\
set -o errexit
set -o nounset
# Create runtime directories
{mk_runtime_dirs}
# Write the script to a file and make it executable
echo "${{_SCRIPT}}" > "{script_path}"
chmod u+x "{script_path}"
# Install gsutil if there are recursive copies to do
{install_cloud_sdk}
# Set environment variables for inputs with wildcards
{export_inputs_with_wildcards}
# Set environment variables for recursive input directories
{export_input_dirs}
# Recursive copy input directories
{copy_input_dirs}
# Create the output directories
{mk_output_dirs}
# Set environment variables for recursive output directories
{export_output_dirs}
# Set TMPDIR
export TMPDIR="{tmpdir}"
# DEPRECATED: do not use DATA_ROOT
export DATA_ROOT=/mnt/data
# Set the working directory
cd "{working_dir}"
# Run the user script
"{script_path}"
# Recursive copy output directories
{copy_output_dirs}
""")
# If an output directory is marked as "recursive", then dsub takes over the
# responsibilities of de-localizing that output directory.
#
# If the docker image already has gsutil in it, then we just use it.
# For large numbers of pipelines that utilize the recursive output feature,
# including Cloud SDK in the docker image is generally preferred.
#
# When one is just getting started with their pipeline, adding Cloud SDK
# installation in their docker image should not be a requirement.
INSTALL_CLOUD_SDK = textwrap.dedent("""\
if ! type gsutil; then
apt-get update
apt-get --yes install ca-certificates gcc gnupg2 python-dev python-setuptools
easy_install -U pip
pip install -U crcmod
apt-get --yes install lsb-release
export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)"
echo "deb http://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" >> /etc/apt/sources.list.d/google-cloud-sdk.list
apt-get update && apt-get --yes install curl
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
apt-get update && apt-get --yes install google-cloud-sdk
fi
""")
# Transient errors for the Google APIs should not cause them to fail.
# There are a set of HTTP and socket errors which we automatically retry.
# 429: too frequent polling
# 50x: backend error
TRANSIENT_HTTP_ERROR_CODES = set([429, 500, 503, 504])
# Socket error 104 (connection reset) should also be retried
TRANSIENT_SOCKET_ERROR_CODES = set([104])
# When attempting to cancel an operation that is already completed
# (succeeded, failed, or canceled), the response will include:
# "error": {
# "code": 400,
# "status": "FAILED_PRECONDITION",
# }
FAILED_PRECONDITION_CODE = 400
FAILED_PRECONDITION_STATUS = 'FAILED_PRECONDITION'
# List of Compute Engine zones, which enables simple wildcard expansion.
# We could look this up dynamically, but new zones come online
# infrequently enough, this is easy to keep up with.
# Also - the Pipelines API may one day directly support zone wildcards.
#
# To refresh this list:
# gcloud compute zones list --format='value(name)' \
# | sort | awk '{ printf " '\''%s'\'',\n", $1 }'
_ZONES = [
'asia-east1-a',
'asia-east1-b',
'asia-east1-c',
'asia-northeast1-a',
'asia-northeast1-b',
'asia-northeast1-c',
'asia-southeast1-a',
'asia-southeast1-b',
'australia-southeast1-a',
'australia-southeast1-b',
'australia-southeast1-c',
'europe-west1-b',
'europe-west1-c',
'europe-west1-d',
'europe-west2-a',
'europe-west2-b',
'europe-west2-c',
'europe-west3-a',
'europe-west3-b',
'europe-west3-c',
'southamerica-east1-a',
'southamerica-east1-b',
'southamerica-east1-c',
'us-central1-a',
'us-central1-b',
'us-central1-c',
'us-central1-f',
'us-east1-b',
'us-east1-c',
'us-east1-d',
'us-east4-a',
'us-east4-b',
'us-east4-c',
'us-west1-a',
'us-west1-b',
'us-west1-c',
]
def _get_zones(input_list):
"""Returns a list of zones based on any wildcard input.
This function is intended to provide an easy method for producing a list
of desired zones for a pipeline to run in.
The Pipelines API default zone list is "any zone". The problem with
"any zone" is that it can lead to incurring Cloud Storage egress charges
if the GCE zone selected is in a different region than the GCS bucket.
See https://cloud.google.com/storage/pricing#network-egress.
A user with a multi-region US bucket would want to pipelines to run in
a "us-*" zone.
A user with a regional bucket in US would want to restrict pipelines to
run in a zone in that region.
Rarely does the specific zone matter for a pipeline.
This function allows for a simple short-hand such as:
[ "us-*" ]
[ "us-central1-*" ]
These examples will expand out to the full list of US and us-central1 zones
respectively.
Args:
input_list: list of zone names/patterns
Returns:
A list of zones, with any wildcard zone specifications expanded.
"""
output_list = []
for zone in input_list:
if zone.endswith('*'):
prefix = zone[:-1]
output_list.extend([z for z in _ZONES if z.startswith(prefix)])
else:
output_list.append(zone)
return output_list
def _print_error(msg):
"""Uti |
ndokter/dsmr_parser | test/experiment_telegram.py | Python | mit | 320 | 0 | from dsmr_parser impor | t telegram_specifications
from dsmr_parser.objects import Telegram
from dsmr_parser.parsers import TelegramParser
from example_telegrams import TELEGRAM_V4_2
parser = TelegramParser(telegram_specifications.V4)
telegram = Telegram(TELEGRAM_V4_2, parser, telegram_spe | cifications.V4)
print(telegram)
|
gamda/gameboard | gameboard/tests/unit_tests.py | Python | mit | 14,863 | 0.010496 | # Copyright (c) 2015 Daniel Garcia
#
# See the file LICENSE.txt for copying permission.
import unittest
import random
from gameboard.gameboard import Gameboard, Direction
from gameboard.coordinate import Coordinate
class TestBoard(unittest.TestCase):
def setUp(self):
self.board = Gameboard()
def test_64_squares(self):
self.assertEqual(len(self.board._squares),64)
def test_index_raises_TypeError(self):
self.assertRaises(TypeError,self.board._indexOf,"notCoordinate")
def test_index_of_coordinate_corner(self):
index = self.board._indexOf(Coordinate.a1)
self.assertEqual(index,(7,0))
def test_index_of_coordinate_top(self):
index = self.board._indexOf(Coordinate.c8)
self.assertEqual(index,(0,2))
def test_index_of_coordinate_right(self):
index = self.board._indexOf(Coordinate.h7)
self.assertEqual(index,(1,7))
def test_index_of_coordinate_bottom(self):
index = self.board._indexOf(Coordinate.d1)
self.assertEqual(index,(7,3))
def test_index_of_coordinate_left(self):
index = self.board._indexOf(Coordinate.a5)
self.assertEqual(index,(3,0))
def test_index_of_coordinate_center(self):
index = self.board._indexOf(Coordinate.d3)
self.assertEqual(index,(5,3))
def test_neighbor_top_with_top_square(self):
n = self.board._neighbor_top(Coordinate.a8)
self.assertEqual(n, None)
def test_neighbor_top(self):
n = self.board._neighbor_top(Coordinate.d3)
self.assertEqual(n, Coordinate.d4)
def test_neighbor_top_right_with_top_square(self):
n = self.board._neighbor_top_right(Coordinate.b8)
self.assertEqual(n, None)
def test_neighbor_top_right_with_right_square(self):
n = self.board._neighbor_top_right(Coordinate.h5)
self.assertEqual(n, None)
def test_neighbor_top_right(self):
n = self.board._neighbor_top_right(Coordinate.f3)
self.assertEqual(n, Coordinate.g4)
def test_neighbor_right_with_right_square(self):
n = self.board._neighbor_right(Coordinate.h5)
self.assertEqual(n, None)
def test_neighbor_right(self):
n = self.board._neighbor_right(Coordinate.d3)
self.assertEqual(n, Coordinate.e3)
def test_neighbor_btm_right_with_btm_square(self):
n = self.board._neighbor_btm_right(Coordinate.b1)
self.assertEqual(n, None)
def test_neighbor_btm_right_with_right_square(self):
n = self.board._neighbor_btm_right(Coordinate.h5)
self.assertEqual(n, None)
def test_neighbor_btm_right(self):
n = self.board._neighbor_btm_right(Coordinate.f3)
self.assertEqual(n, Coordinate.g2)
def test_neighbor_btm_with_btm_square(self):
n = self.board._neighbor_btm(Coordinate.a1)
self.assertEqual(n, None)
def test_neighbor_btm(self):
n = self.board._neighbor_btm(Coordinate.d3)
self.assertEqual(n, Coordinate.d2)
def test_neighbor_btm_left_with_btm_square(self):
n = self.board._neighbor_btm_left(Coordinate.b1)
self.assertEqual(n, None)
def test_neighbor_btm_left_with_left_square(self):
n = self.board._neighbor_btm_left(Coordinate.a6)
self.assertEqual(n, None)
def test_neighbor_btm_left(self):
n = self.board._neighbor_btm_left(Coordinate.f3)
self.assertEqual(n, Coordinate.e2)
def test_neighbor_left_with_left_square(self):
n = self.board._neighbor_left(Coordinate.a5)
self.assertEqual(n, None)
def test_neighbor_left(self):
n = self.board._neighbor_left(Coordinate.d3)
self.assertEqual(n, Coordinate.c3)
def test_neighbor_top_left_with_top_square(self):
n = self.board._neighbor_top_left(Coordinate.b8)
self.assertEqual(n, None)
def test_neighbor_top_left_with_left_square(self):
n = self.board._neighbor_top_left(Coordinate.a5)
self.assertEqual(n, None)
def test_neighbor_top_left(self):
n = self.board._neighbor_top_left(Coordinate.f3)
self.assertEqual(n, Coordinate.e4)
def test_neighbor_in_direction_raises_TypeError(self):
self.assertRaises(TypeError, self.board.neighbor_in_direction,
square = "notCoordinate",
direction = Direction.top)
self.assertRaises(TypeError, self.board.neighbor_in_direction,
square = Coordinate.a1,
direction = "notDirection")
def test_neighbor_in_direction_top(self):
n = self.board.neighbor_in_direction(Coordinate.d3, Direction.top)
self.assertEqual(n, Coordinate.d4)
def test_neighbor_in_direction_top_right(self):
n = self.board.neighbor_in_direction(Coordinate.d3, Direction.top_right)
self.assertEqual(n, Coordinate.e4)
def test_neighbor_in_direction_right(self):
n = self.board.neighbor_in_direction(Coordinate.d3, Direction.right)
self.assertEqual(n, Coordinate.e3)
def test_neighbor_in_direction_btm_right(self):
n = self.board.neighbor_in_direction(Coordinate.d3, Direction.btm_right)
self.assertEqual(n, Coordinate.e2)
def test_neighbor_in_direction_btm(self):
n = self.board.neighbor_in_direction(Coordinate.d3, Direction.btm)
self.assertEqual(n, Coordinate.d2)
def test_neighbor_in_directino_btm_left(self):
n = self.board.neighbor_in_direction(Coordinate.d3, Direction.btm_left)
self.assertEqual(n, Coordinate.c2)
def test_neighbor_in_direction_left(self):
n = self.board.neighbor_in_direction(Coordinate.d3, Direction.left)
self.assertEqual(n, Coordinate.c3)
def test_neighbor_in_direction_top_left(self):
n = self.board.neighbor_in_direction(Coordinate.d3, Direction.top_left)
self.assertEqual(n, Coordinate.c4)
def test_neighbors_raises_TypeError(self):
self.assertRaises(TypeError,self.board.neighbors,"notCoordinate")
def test_neighbors_corner(self):
n = self.board.neighbors(Coordinate.a1)
correct = {Direction.top: Coordinate.a2,
Direction.top_right: Coordinate.b2,
Direction.right: Coordinate.b1,
Direction.btm_right: None,
Direction.btm: None,
Direction.btm_left: None,
Direction.left: None,
Direction.top_left: None}
self.assertEqual(n, correct)
def test_neighbors_top(self):
n = self.board.neighbors(Coordinate.c8)
correct = {Direction.top: None,
Direction.top_right: None,
Direction.right: Coordinate.d8,
Direction.bt | m_right: Coordinate.d7,
Direction.btm: Coordinate.c7,
Direction.btm_left: Coordinate.b7,
Direction.left: Coordinate.b8,
Direction.top_left: None}
self.assertEqual(n, correct)
def test_neighbors_right(self):
n = self.board.neighbors(Coordinate.h7)
correct = {Direction.top: Coordinate.h8,
Direction.top_right: None,
Direction.right: None,
| Direction.btm_right: None,
Direction.btm: Coordinate.h6,
Direction.btm_left: Coordinate.g6,
Direction.left: Coordinate.g7,
Direction.top_left: Coordinate.g8}
self.assertEqual(n, correct)
def test_neighbors_bottom(self):
n = self.board.neighbors(Coordinate.d1)
correct = {Direction.top: Coordinate.d2,
Direction.top_right: Coordinate.e2,
Direction.right: Coordinate.e1,
Direction.btm_right: None,
Direction.btm: None,
Direction.btm_left: None,
Direction.left: Coordinate.c1,
Direction.top_left: Coordinate.c2}
self.assertEqual(n, correct)
def test_neighbors_left(self):
n = self.board.neighbors(Coordinate.a5)
correct = {Direction.top: Coordinate.a6 |
IraKorshunova/kaggle-seizure-prediction | utils/data_splitter.py | Python | mit | 4,982 | 0.004617 | import random
import numpy as np
import itertools, copy
def split_data_with_overlap(data_grouped_by_hour, valid_size, overlap_size, window_size,
overlap_interictal=True, overlap_preictal=True, random_state=42):
random.seed(random_state)
number_of_test_interictal_hours = max(1, int(len(data_grouped_by_hour['interictal']) * valid_size))
number_of_test_preictal_hours = max(1, int(len(data_grouped_by_hour['preictal']) * valid_size))
interictal_hours_indexes = range(len(data_grouped_by_hour['interictal']))
preictal_hours_indexes = range(len(data_grouped_by_hour['preictal']))
valid_interictal_hours_indexes = random.sample(interictal_hours_indexes, number_of_test_interictal_hours)
valid_preictal_hours_indexes = random.sample(preictal_hours_indexes, number_of_test_preictal_hours)
train_interictal_hours_indexes = [idx for idx in interictal_hours_indexes if
idx not in set(valid_interictal_hours_indexes)]
train_preictal_hours_indexes = [idx for idx in preictal_hours_indexes if
| idx not in set(valid_preictal_hours_indexes)]
def fill_data_list(class_label, indexes):
overlap = overlap | _preictal if class_label == 'preictal' else overlap_interictal
x = []
for idx in indexes:
data_hour = data_grouped_by_hour[class_label][idx]
if overlap:
data = np.concatenate(data_hour, axis=2)
for i in xrange(divmod(data.shape[-1] - overlap_size, window_size - overlap_size)[0]):
i *= window_size - overlap_size
x.append(data[..., i:i + window_size])
else:
x.extend(data_hour)
return x
x_valid_interictal = fill_data_list('interictal', valid_interictal_hours_indexes)
x_valid_preictal = fill_data_list('preictal', valid_preictal_hours_indexes)
x_train_interictal = fill_data_list('interictal', train_interictal_hours_indexes)
x_train_preictal = fill_data_list('preictal', train_preictal_hours_indexes)
x_valid = x_valid_interictal + x_valid_preictal
y_valid = len(x_valid_interictal) * [0] + len(x_valid_preictal) * [1]
combined = zip(x_valid, y_valid)
random.shuffle(combined)
x_valid[:], y_valid[:] = zip(*combined)
x_train = x_train_interictal + x_train_preictal
y_train = len(x_train_interictal) * [0] + len(x_train_preictal) * [1]
combined = zip(x_train, y_train)
random.shuffle(combined)
x_train[:], y_train[:] = zip(*combined)
return np.array(x_train, dtype='float32'), np.array(y_train, dtype='int8'), \
np.array(x_valid, dtype='float32'), np.array(y_valid, dtype='int8')
def generate_overlapped_data(data_grouped_by_hour, overlap_size, window_size,
overlap_interictal=True, overlap_preictal=True, random_state=42):
random.seed(random_state)
interictal_hours_indexes = range(len(data_grouped_by_hour['interictal']))
preictal_hours_indexes = range(len(data_grouped_by_hour['preictal']))
def fill_data_list(class_label, indexes):
overlap = overlap_preictal if class_label == 'preictal' else overlap_interictal
x = []
for idx in indexes:
data_hour = data_grouped_by_hour[class_label][idx]
if overlap:
data = np.concatenate(data_hour, axis=2)
for i in xrange(divmod(data.shape[-1] - overlap_size, window_size - overlap_size)[0]):
i *= window_size - overlap_size
x.append(data[..., i:i + window_size])
else:
x.extend(data_hour)
return x
x_interictal = fill_data_list('interictal', interictal_hours_indexes)
x_preictal = fill_data_list('preictal', preictal_hours_indexes)
x = x_interictal + x_preictal
y = len(x_interictal) * [0] + len(x_preictal) * [1]
combined = zip(x, y)
random.shuffle(combined)
x[:], y[:] = zip(*combined)
return np.array(x, dtype='float32'), np.array(y, dtype='int8')
def split_train_valid_filenames(subject, filenames_grouped_by_hour, random_state=42):
rng = np.random.RandomState(random_state)
preictal = copy.deepcopy(filenames_grouped_by_hour[subject]['preictal'])
rng.shuffle(preictal)
interictal = copy.deepcopy(filenames_grouped_by_hour[subject]['interictal'])
rng.shuffle(interictal)
n_preictal = len(preictal)
n_interictal = len(interictal)
n_valid_preictal = int(max(1, np.round(0.25 * n_preictal)))
n_valid_interictal = int(max(1, np.round(0.25 * n_interictal)))
valid = preictal[:n_valid_preictal] + interictal[:n_valid_interictal]
valid = list(itertools.chain.from_iterable(valid))
train = preictal[n_valid_preictal:] + interictal[n_valid_interictal:]
train = list(itertools.chain.from_iterable(train))
return {'train_filenames': train, 'valid_filnames': valid}
|
piskvorky/smart_open | smart_open/smart_open_lib.py | Python | mit | 15,002 | 0.001067 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Radim Rehurek <me@radimrehurek.com>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
#
"""Implements the majority of smart_open's top-level API.
The main functions are:
* ``parse_uri()``
* ``open()``
"""
import codecs
import collections
import logging
import os
import os.path as P
import pathlib
import urllib.parse
import warnings
import sys
import boto3
#
# This module defines a function called smart_open so we cannot use
# smart_open.submodule to reference to the submodules.
#
import smart_open.local_file as so_file
from smart_open import compression
from smart_open import doctools
from smart_open import transport
from smart_open import utils
#
# For backwards compatibility and keeping old unit tests happy.
#
from smart_open.compression import register_compressor # noqa: F401
from smart_open.utils import check_kwargs as _check_kwargs # noqa: F401
from smart_open.utils import inspect_kwargs as _inspect_kwargs # noqa: F401
logger = logging.getLogger(__name__)
SYSTEM_ENCODING = sys.getdefaultencoding()
_TO_BINARY_LUT = {
'r': 'rb', 'r+': 'rb+', 'rt': 'rb', 'rt+': 'rb+',
'w': 'wb', 'w+': 'wb+', 'wt': 'wb', "wt+": 'wb+',
'a': 'ab', 'a+': 'ab+', 'at': 'ab', 'at+': 'ab+',
}
def _sniff_scheme(uri_as_string):
"""Returns the scheme of the URL only, as a string."""
#
# urlsplit doesn't work on Windows -- it parses the drive as the scheme...
# no protocol given => assume a local file
#
if os.name == 'nt' and '://' not in uri_as_string:
uri_as_string = 'file://' + uri_as_string
return urllib.parse.urlsplit(uri_as_string).scheme
def parse_uri(uri_as_string):
"""
Parse the given URI from a string.
Parameters
----------
uri_as_string: str
The URI to parse.
Returns
-------
collections.namedtuple
The parsed URI.
Notes
-----
smart_open/doctools.py magic goes here
"""
scheme = _sniff_scheme(uri_as_string)
submodule = transport.get_transport(scheme)
as_dict = submodule.parse_uri(uri_as_string)
#
# The conversion to a namedtuple is just to keep the old tests happy while
# I'm still refactoring.
#
Uri = collections.namedtuple('Uri', sorted(as_dict.keys()))
return Uri(**as_dict)
#
# To keep old unit tests happy while I'm refactoring.
#
_parse_uri = parse_uri
_builtin_open = open
def open(
uri,
mode='r',
buffering=-1,
encoding=None,
errors=None,
newline=None,
closefd=True,
opener=None,
ignore_ext=False,
transport_params=None,
):
r"""Open the URI object, returning a file-like object.
The URI is usually a string in a variety of formats.
For a full list of examples, see the :func:`parse_uri` function.
The URI may also be one of:
- an instance of the pathlib.Path class
- a stream (anything that implements io.IOBase-like functionality)
Parameters
----------
uri: str or object
The object to open.
mode: str, optional
Mimicks built-in open parameter of the same name.
buffering: int, optional
Mimicks built-in open parameter of the same name.
encoding: str, optional
Mimicks built-in open parameter of the same name.
errors: str, optional
Mimicks built-in open parameter of the same name.
newline: str, optional
Mimicks built-in open parameter of the same name.
closefd: boolean, optional
Mimicks built-in open parameter of the same name. Ignored.
opener: object, optional
Mimicks built-in open parameter of the same name. Ignored.
ignore_ext: boolean, optional
Disable transparent compression/decompression based on the file extension.
transport_params: dict, optional
Additional parameters for the transport layer (see notes below).
Returns
-------
A file-like object.
Notes
-----
smart_open has several implementations for its transport layer (e.g. S3, HTTP).
Each transport layer has a different set of keyword arguments for overriding
default behavior. If you specify a keyword argument that is *not* supported
by the trans | port layer being used, smart_open will ignore that argument and
log a warning message.
smart_open/doctools.py magic goes here
See Also
--------
- `Standard library reference <https://docs.python.org/3.7/library/functions.html#open>`__
- `smart_open README.rst
<https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst>`__
"""
logger.debug('%r', locals())
if not isinstance(mode, str):
raise TypeError('mode | should be a string')
if transport_params is None:
transport_params = {}
fobj = _shortcut_open(
uri,
mode,
ignore_ext=ignore_ext,
buffering=buffering,
encoding=encoding,
errors=errors,
)
if fobj is not None:
return fobj
#
# This is a work-around for the problem described in Issue #144.
# If the user has explicitly specified an encoding, then assume they want
# us to open the destination in text mode, instead of the default binary.
#
# If we change the default mode to be text, and match the normal behavior
# of Py2 and 3, then the above assumption will be unnecessary.
#
if encoding is not None and 'b' in mode:
mode = mode.replace('b', '')
if isinstance(uri, pathlib.Path):
uri = str(uri)
explicit_encoding = encoding
encoding = explicit_encoding if explicit_encoding else SYSTEM_ENCODING
#
# This is how we get from the filename to the end result. Decompression is
# optional, but it always accepts bytes and returns bytes.
#
# Decoding is also optional, accepts bytes and returns text. The diagram
# below is for reading, for writing, the flow is from right to left, but
# the code is identical.
#
# open as binary decompress? decode?
# filename ---------------> bytes -------------> bytes ---------> text
# binary decompressed decode
#
binary_mode = _TO_BINARY_LUT.get(mode, mode)
binary = _open_binary_stream(uri, binary_mode, transport_params)
if ignore_ext:
decompressed = binary
else:
decompressed = compression.compression_wrapper(binary, mode)
if 'b' not in mode or explicit_encoding is not None:
decoded = _encoding_wrapper(decompressed, mode, encoding=encoding, errors=errors)
else:
decoded = decompressed
return decoded
_MIGRATION_NOTES_URL = (
'https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst'
'#migrating-to-the-new-open-function'
)
def smart_open(uri, mode="rb", **kw):
"""Deprecated, use smart_open.open instead.
See the migration instructions: %s
""" % _MIGRATION_NOTES_URL
warnings.warn(
'This function is deprecated, use smart_open.open instead. '
'See the migration notes for details: %s' % _MIGRATION_NOTES_URL
)
#
# The new function uses a shorter name for this parameter, handle it separately.
#
ignore_extension = kw.pop('ignore_extension', False)
expected_kwargs = utils.inspect_kwargs(open)
scrubbed_kwargs = {}
transport_params = {}
#
# Handle renamed keyword arguments. This is required to maintain backward
# compatibility. See test_smart_open_old.py for tests.
#
if 'host' in kw or 's3_upload' in kw:
transport_params['multipart_upload_kwargs'] = {}
transport_params['resource_kwargs'] = {}
if 'host' in kw:
url = kw.pop('host')
if not url.startswith('http'):
url = 'http://' + url
transport_params['resource_kwargs'].update(endpoint_url=url)
if 's3_upload' in kw and kw['s3_upload']:
transport_params['multipart_upload_kwargs'].update(**kw.pop('s3_upload'))
#
# Providing the entire Session object as opposed to just the profile n |
biogeo/tinbergen | tinbergen.py | Python | mit | 24,413 | 0.004956 | #!/bin/python
"""
Main user interface and controller for Tinbergen.
"""
import sys
import os
import gobject
import gtk
import gst
import tbdatamodel
import string
#import math
NO_TIME = float('nan')
if __name__ == '__main__':
# Get the path to this script using sys.argv[0]
script_dir = os.path.dirname(sys.argv[0])
else:
# Get the path to this script using __file__
script_dir = os.path.dirname(__file__)
mainwin_gladefile = os.path.join(script_dir, 'tb_mainwin.glade')
class MainUI:
"""
A class to open a window for coding a Tinbergen project.
"""
key_dispatch = {'new obs': gtk.gdk.keyval_from_name('Tab'),
'step forward': gtk.gdk.keyval_from_name('Right'),
'step back': gtk.gdk.keyval_from_name('Left'),
'play/pause': gtk.gdk.keyval_from_name('space'),
'speed x2': gtk.gdk.keyval_from_name('backslash'),
'speed x1': gtk.gdk.keyval_from_name('br | acketright'),
'speed x.5': gtk.gdk.keyval_from_name('bracketleft')}
hotkey_list = [gtk.gdk.keyval_from_name(c) for c in string.ascii_letters+string.digits]
def __init__(self, project):
self.project = project
self._cur_observer = None
self._cur_video = None
self._cur_video_rate = 1.0 # Ought to be able to do this by querying
# the player, but I can't figure out how
self.current_modif | ied = False
# Load UI from Glade file:
builder = gtk.Builder()
builder.add_from_file(mainwin_gladefile)
# Get references to relevant objects as attributes of self:
ui_objects = ['main_win','observer_combo','file_nav','behavior_nav',
'video_area', 'play_button', 'time_scale']
for item in ui_objects:
setattr(self, item, builder.get_object(item))
self.time_scale.set_digits(3)
# Connect signals from UI to methods of self:
builder.connect_signals(self)
self.behavior_entry_cell = gtk.CellRendererText()
self.configure_observer_combo()
self.configure_file_nav()
self.configure_behavior_nav()
self.player = gst.element_factory_make('playbin2')
bus = self.player.get_bus()
bus.add_signal_watch()
bus.enable_sync_message_emission()
bus.connect('message::eos', self.on_video_end)
bus.connect('message::state-changed', self.on_player_state_change)
bus.connect('sync-message::element', self.on_attach_video_window)
self.time_update_handle = None
self.current_framerate = None
self.main_win.show()
def get_current_observer(self):
"Returns the current observer."
return self._cur_observer
def set_current_observer(self, new):
"""
Sets the current observer. If new is not a valid observer code for the
project, sets to no current observer.
"""
if new == self._cur_observer:
return
# We are about to close the observations, so save them first.
self.save_current_obs()
observer_codes = [obs['code'] for obs in self.project.observers]
if new not in observer_codes:
new = None
self._cur_observer = new
# Update the Observer combobox to reflect the change.
combo_active = self.observer_combo.get_active()
combo_model = self.observer_combo.get_model()
combo_items = [row[0] for row in combo_model]
if combo_active < 1:
combo_current = None
else:
combo_current = combo_items[combo_active]
if new != combo_current:
# Only change the combobox if necessary, to avoid looping
if new is None:
self.observer_combo.set_active(0)
else:
new_active = combo_items.index(new)
self.observer_combo.set_active(new_active)
# Now that the observer has changed, open observations again.
self.open_observations()
def get_current_video(self):
"Returns the current video file."
return self._cur_video
def set_current_video(self, new):
"""
Set the current video file.
"""
if new == self._cur_video:
return
if new not in self.project.video_files:
new = None
# Close the video
self.player.set_state(gst.STATE_NULL)
# We're about to close the current observations, so save first
self.save_current_obs()
self._cur_video = new
# If the selected video is not the new video, update the selection
nav_selection = self.file_nav.get_selection()
nav_model, nav_iter = nav_selection.get_selected()
if nav_iter is None:
nav_current = None
else:
nav_current = nav_model.get_value(nav_iter, 0)
if new != nav_current:
if new is None:
nav_selection.unselect_all()
else:
for ind in xrange(len(nav_model)):
if new == nav_model[ind][0]:
nav_selection.select_path(ind)
break
if new is not None:
# Open the new video
video_path = self.project.join_video_path(new)
self.player.set_property('uri', 'file://' + video_path)
self.player.set_state(gst.STATE_PAUSED)
self.open_observations()
def get_current_time(self):
"""
For the currently open video, get the current time, in seconds. If there
is no video open, return 0.
"""
try:
nanosecs, format = self.player.query_position(gst.FORMAT_TIME)
#return float(nanosecs) / gst.SECOND
#return math.floor(float(nanosecs) / gst.SECOND
# * self.current_framerate) / self.current_framerate
return (float(nanosecs / int(gst.SECOND / self.current_framerate))
/ self.current_framerate)
except gst.QueryError:
return 0
def set_current_time(self, time):
"""
Seek to a given time in the video.
"""
if time < 0:
time = 0.0
self.player.seek(self.get_video_rate(), gst.FORMAT_TIME,
gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_ACCURATE,
gst.SEEK_TYPE_SET, int(time * gst.SECOND),
gst.SEEK_TYPE_NONE, 0)
def get_video_duration(self):
"""
Get the duration of the current video, in seconds.
"""
try:
nanosecs, format = self.player.query_duration(gst.FORMAT_TIME)
return float(nanosecs) / gst.SECOND
except gst.QueryError:
return 0
def get_video_rate(self):
"Get the current relative playback rate. 1.0 is normal speed."
return self._cur_video_rate
def set_video_rate(self, rate):
"Set the current relative playback rate. 1.0 normal speed."
try:
nanosecs, format = self.player.query_position(gst.FORMAT_TIME)
self.player.seek(rate, gst.FORMAT_TIME,
gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_ACCURATE,
gst.SEEK_TYPE_SET, nanosecs,
gst.SEEK_TYPE_NONE, 0)
self._cur_video_rate = rate
except gst.QueryError:
return
def can_edit_observations(self):
"Returns True if the observations are currently editable."
return self._cur_observer is not None and self._cur_video is not None
def is_video_playing(self):
status,state,pending = self.player.get_state(0)
return state == gst.STATE_PLAYING
def is_video_loaded(self):
status,state,pending = self.player.get_state(0)
return state in [gst.STATE_PAUSED, gst.STATE_PLAYING]
#------- CONFIGURE OBJECTS -------
def configure_observer_combo(self):
# Create model and cell renderers for the observer combobox
model = gtk.ListStore |
croscon/fleaker | tests/marshmallow/test_extension.py | Python | bsd-3-clause | 1,644 | 0.001217 | # ~*~ coding: utf-8 ~*~
"""
tests.marshmallow.test_extension
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for the :class:`MarshmallowAwareApp` to ensure that it will properly
register the extension and can be used, as well as testing the top level
schema.
"""
import pytest
from flask_marshmallow import fields
from fleaker import Schema
from fleaker.marshmallow import MarshmallowAwareApp, marsh
SERVER_NAME = 'localhost'
def _crea | te_app():
"""Create the app for testing."""
app = MarshmallowAwareApp.create_app('tests.marshmallow')
app.config['SERVER_NAME'] = SERVER_NAME
@app.route('/test')
def test():
"""Test route for Flask URL generation."""
return b'test'
return app
def test_ | marshmallow_extension_creation():
"""Ensure creating the MM Aware app registers the extension."""
app = _create_app()
# now check for the proper extension
assert 'flask-marshmallow' in app.extensions
assert app.extensions['flask-marshmallow'] is marsh
def test_marshmallow_extension_url_for():
"""Ensure that the UrlFor field with Flask-Marshmallow works."""
app = _create_app()
class TestSchema(Schema):
"""Only has a link field"""
link = fields.UrlFor('test', _external=False)
ext_link = fields.UrlFor('test', _scheme='https', _external=True)
schema = TestSchema()
# not in an app context, should fail
with pytest.raises(RuntimeError):
schema.dump({})
with app.app_context():
data = schema.dump({}).data
assert data['link'] == '/test'
assert data['ext_link'] == 'https://{}/test'.format(SERVER_NAME)
|
roadmapper/ansible | lib/ansible/modules/cloud/google/gcp_compute_health_check.py | Python | gpl-3.0 | 47,123 | 0.004159 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_health_check
description:
- Health Checks determine whether instances are responsive and able to do work.
- They are an important part of a comprehensive load balancing configuration, as they
enable monitoring instances behind load balancers.
- Health Checks poll instances at a specified interval. Instances that do not respond
successfully to some number of probes in a row are marked as unhealthy. No new connections
are sent to unhealthy instances, though existing connections will continue. The
health check will continue to poll unhealthy instances. If an instance later responds
successfully to some number of consecutive probes, it is marked healthy again and
can receive new connections.
short_description: Creates a GCP HealthCheck
version_added: '2.6'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
check_interval_sec:
description:
- How often (in seconds) to send a health check. The default value is 5 seconds.
required: false
default: '5'
type: int
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
required: false
type: str
healthy_threshold:
description:
- A so-far unhealthy instance will be marked healthy after this many consecutive
successes. The default value is 2.
required: false
default: '2'
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
| required: true
type: str
timeout_sec:
description:
- How long (in seconds) to wait before claiming failure.
- The default value is 5 seconds | . It is invalid for timeoutSec to have greater
value than checkIntervalSec.
required: false
default: '5'
type: int
aliases:
- timeout_seconds
unhealthy_threshold:
description:
- A so-far healthy instance will be marked unhealthy after this many consecutive
failures. The default value is 2.
required: false
default: '2'
type: int
type:
description:
- Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If not
specified, the default is TCP. Exactly one of the protocol-specific health check
field must be specified, which must match type field.
- 'Some valid choices include: "TCP", "SSL", "HTTP", "HTTPS", "HTTP2"'
required: false
type: str
http_health_check:
description:
- A nested object resource.
required: false
type: dict
suboptions:
host:
description:
- The value of the host header in the HTTP health check request.
- If left empty (default value), the public IP on behalf of which this health
check is performed will be used.
required: false
type: str
request_path:
description:
- The request path of the HTTP health check request.
- The default value is /.
required: false
default: "/"
type: str
response:
description:
- The bytes to match against the beginning of the response data. If left empty
(the default value), any response will indicate health. The response data
can only be ASCII.
required: false
type: str
port:
description:
- The TCP port number for the HTTP health check request.
- The default value is 80.
required: false
type: int
port_name:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name
are defined, port takes precedence.
required: false
type: str
proxy_header:
description:
- Specifies the type of proxy header to append before sending data to the
backend, either NONE or PROXY_V1. The default is NONE.
- 'Some valid choices include: "NONE", "PROXY_V1"'
required: false
default: NONE
type: str
port_specification:
description:
- 'Specifies how port is selected for health checking, can be one of the following
values: * `USE_FIXED_PORT`: The port number in `port` is used for health
checking.'
- "* `USE_NAMED_PORT`: The `portName` is used for health checking."
- "* `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for
each network endpoint is used for health checking. For other backends, the
port or named port specified in the Backend Service is used for health checking."
- If not specified, HTTP health check follows behavior specified in `port`
and `portName` fields.
- 'Some valid choices include: "USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"'
required: false
type: str
version_added: '2.9'
https_health_check:
description:
- A nested object resource.
required: false
type: dict
suboptions:
host:
description:
- The value of the host header in the HTTPS health check request.
- If left empty (default value), the public IP on behalf of which this health
check is performed will be used.
required: false
type: str
request_path:
description:
- The request path of the HTTPS health check request.
- The default value is /.
required: false
default: "/"
type: str
response:
description:
- The bytes to match against the beginning of the response data. If left empty
(the default value), any response will indicate health. The response data
can only be ASCII.
required: false
type: str
port:
description:
- The TCP port number for the HTTPS health check request.
- The default value is 443.
required: false
type: int
port_name:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and port_name
are defined, port takes precedence.
required: false
type: str
proxy_header:
description:
- Specifies the type of proxy header to append before sending data to the
backend, either NONE or PROXY_V1. The default is NONE.
- 'Some valid choices include: "NONE", "PROXY_V1"'
required: false
default: NONE
|
open-mmlab/mmdetection | configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py | Python | apache-2.0 | 4,807 | 0 | _base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta | =1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb | =True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_train.json',
img_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_val.json',
img_prefix=data_root,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_val.json',
img_prefix=data_root,
pipeline=test_pipeline))
evaluation = dict(interval=24, metric=['bbox', 'segm'])
|
alexmoratalla/yambopy | yambopy/units.py | Python | bsd-3-clause | 2,898 | 0.007591 | I = complex(0,1)
ha2ev = 27.211396132
ev2cm1 = 8065.5440044136285
bohr2ang = 0.52917720859
atomic_mass = [ None, 1.00794, 4.002602, 6.941, 9.012182,
10.811, 12.0107, 14.0067, 15.9994, 18.9984032,
20.1797, 22.98976928, 24.305,26.9815386, 28.0855,
30.973762, 32.065, 35.453, 39.948, 39.0983,
40.078, 44.955912, 47.867, 50.9415, 51.9961,
54.938045, 55.845, 58.933195, 58.6934, 63.546,
65.38, 69.723, 72.64, 74.9216, 78.96,
79.904, 83.798, 85.4678, 87.62, 88.90585,
91.224, 92.90638, 95.96, None, 101.07,
102.9055, 106.42, 107.8682, 112.411, 114.818,
118.71, 121.76, 127.6, 126.90447, 131.293,
132.9054519, 137.327, 138.90547, 140.116, 140.90765,
144.242, None, 150.36, 151.964, 157.25,
158.92535, 162.5, 164.93032, 167.259, 168.93421,
173.054, 174.9668, 178.49, 180.94788, 183.84,
186.207, 190.23, 192.217, 195.084, 196.966569,
200.59, 204.3833, 207.2, 208.9804, None,
None, None, None, None, None,
232.03806, 231.03588, 238.02891, None, None,
None, None, None, None, No | ne,
None, None, None, None, None,
None, None, None, None, None,
None, None, None, None, None,
None, None, None, None]
chemical_symbols = ['X', 'H', 'He', 'Li', 'Be',
'B', 'C', 'N', 'O', 'F',
'Ne', 'Na', 'Mg', 'Al', 'Si',
| 'P', 'S', 'Cl', 'Ar', 'K',
'Ca', 'Sc', 'Ti', 'V', 'Cr',
'Mn', 'Fe', 'Co', 'Ni', 'Cu',
'Zn', 'Ga', 'Ge', 'As', 'Se',
'Br', 'Kr', 'Rb', 'Sr', 'Y',
'Zr', 'Nb', 'Mo', 'Tc', 'Ru',
'Rh', 'Pd', 'Ag', 'Cd', 'In',
'Sn', 'Sb', 'Te', 'I', 'Xe',
'Cs', 'Ba', 'La', 'Ce', 'Pr',
'Nd', 'Pm', 'Sm', 'Eu', 'Gd',
'Tb', 'Dy', 'Ho', 'Er', 'Tm',
'Yb', 'Lu', 'Hf', 'Ta', 'W',
'Re', 'Os', 'Ir', 'Pt', 'Au',
'Hg', 'Tl', 'Pb', 'Bi', 'Po',
'At', 'Rn', 'Fr', 'Ra', 'Ac',
'Th', 'Pa', 'U', 'Np', 'Pu',
'Am', 'Cm', 'Bk', 'Cf', 'Es',
'Fm', 'Md', 'No', 'Lr']
|
tritoanst/ccxt | python/ccxt/bitstamp1.py | Python | mit | 10,277 | 0.001946 | # -*- coding: utf-8 -*-
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NotSupported
class bitstamp1 (Exchange):
def describe(self):
return self.deep_extend(super(bitstamp1, self).describe(), {
'id': 'bitstamp1',
'name': 'Bitstamp v1',
'countries': 'GB',
'rateLimit': 1000,
'version': 'v1',
'hasCORS': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27786377-8c8ab57e-5fe9-11e7-8ea4-2b05b6bcceec.jpg',
'api': 'https://www.bitstamp.net/api',
'www': 'https://www.bitstamp.net',
'doc': 'https://www.bitstamp.net/api',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'get': [
'ticker',
'ticker_hour',
'order_book',
'transactions',
'eur_usd',
],
},
'private': {
'post': [
'balance',
'user_transactions',
'open_orders',
'order_status',
'cancel_order',
'cancel_all_orders',
'buy',
'sell',
'bitcoin_deposit_address',
'unconfirmed_btc',
'ripple_withdrawal',
'ripple_address',
'withdrawal_requests',
'bitcoin_withdrawal',
],
},
},
'markets': {
'BTC/USD': {'id': 'btcusd', 'symbol': 'BTC/USD', 'base': 'BTC', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'BTC/EUR': {'id': 'btceur', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025},
'EUR/USD': {'id': 'eurusd', 'symbol': 'EUR/USD', 'base': 'EUR', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'XRP/USD': {'id': 'xrpusd', 'symbol': 'XRP/USD', 'base': 'XRP', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'XRP/EUR': {'id': 'xrpeur', 'symbol': 'XRP/EUR', 'base': 'XRP', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025},
'XRP/BTC': {'id': 'xrpbtc', 'symbol': 'XRP/BTC', 'base': 'XRP', 'quote': 'BTC', 'maker': 0.0025, 'taker': 0.0025},
'LTC/USD': {'id': 'ltcusd', 'symbol': 'LTC/USD', 'base': 'LTC', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'LTC/EUR': {'id': 'ltceur', 'symbol': 'LTC/EUR', 'base': 'LTC', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025},
'LTC/BTC': {'id': 'ltcbtc', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'maker': 0.0025, 'taker': 0.0025},
'ETH/USD': {'id': 'ethusd', 'symbol': 'ETH/USD', 'base': 'ETH', 'quote': 'USD', 'maker': 0.0025, 'taker': 0.0025},
'ETH/EUR': {'id': 'etheur', 'symbol': 'ETH/EUR', 'base': 'ETH', 'quote': 'EUR', 'maker': 0.0025, 'taker': 0.0025},
'ETH/BTC': {'id': 'ethbtc', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC', 'maker': 0.0025, 'taker': 0.0025},
},
})
def fetch_order_book(self, symbol, params={}):
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' ' + self.version + " fetchOrderBook doesn't support " + symbol + ', use it for BTC/USD only')
orderbook = self.pu | blicGetOrderBook(params)
timestamp = int(orderbook['timestamp']) * 1000
return self.parse_order_book(orderbook, timestamp)
def fetch_ticker(self, symbol, params={}):
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' ' + self.version + " fetchTicker doesn | 't support " + symbol + ', use it for BTC/USD only')
ticker = self.publicGetTicker(params)
timestamp = int(ticker['timestamp']) * 1000
vwap = float(ticker['vwap'])
baseVolume = float(ticker['volume'])
quoteVolume = baseVolume * vwap
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': vwap,
'open': float(ticker['open']),
'close': None,
'first': None,
'last': float(ticker['last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market=None):
timestamp = None
if 'date' in trade:
timestamp = int(trade['date']) * 1000
elif 'datetime' in trade:
# timestamp = self.parse8601(trade['datetime'])
timestamp = int(trade['datetime']) * 1000
side = 'buy' if (trade['type'] == 0) else 'sell'
order = None
if 'order_id' in trade:
order = str(trade['order_id'])
if 'currency_pair' in trade:
if trade['currency_pair'] in self.markets_by_id:
market = self.markets_by_id[trade['currency_pair']]
return {
'id': str(trade['tid']),
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': order,
'type': None,
'side': side,
'price': float(trade['price']),
'amount': float(trade['amount']),
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' ' + self.version + " fetchTrades doesn't support " + symbol + ', use it for BTC/USD only')
market = self.market(symbol)
response = self.publicGetTransactions(self.extend({
'time': 'minute',
}, params))
return self.parse_trades(response, market, since, limit)
def fetch_balance(self, params={}):
balance = self.privatePostBalance()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
lowercase = currency.lower()
total = lowercase + '_balance'
free = lowercase + '_available'
used = lowercase + '_reserved'
account = self.account()
account['free'] = self.safe_float(balance, free, 0.0)
account['used'] = self.safe_float(balance, used, 0.0)
account['total'] = self.safe_float(balance, total, 0.0)
result[currency] = account
return self.parse_balance(result)
def create_order(self, symbol, type, side, amount, price=None, params={}):
if type != 'limit':
raise ExchangeError(self.id + ' ' + self.version + ' accepts limit orders only')
if symbol != 'BTC/USD':
raise ExchangeError(self.id + ' v1 supports BTC/USD orders only')
method = 'privatePost' + self.capitalize(side)
order = {
'amount': amount,
'price': price,
}
response = getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['id'],
}
def cancel_order(self, id, symbol=None, params={}):
return self.privatePostCancelOrder({'id': id})
def parse_order_status(self, order):
if (order['status'] == 'Queue') or (order['status'] == 'Open'):
return 'open'
if order['status'] == 'Finished':
return 'closed'
|
addition-it-solutions/project-all | addons/account_check_writing/report/check_print.py | Python | agpl-3.0 | 2,876 | 0.006259 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of | the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is d | istributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class report_print_check(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_print_check, self).__init__(cr, uid, name, context)
self.number_lines = 0
self.number_add = 0
self.localcontext.update({
'time': time,
'get_lines': self.get_lines,
'fill_stars' : self.fill_stars,
})
def fill_stars(self, amount):
if len(amount) < 100:
stars = 100 - len(amount)
return ' '.join([amount,'*'*stars])
else: return amount
def get_lines(self, voucher_lines):
result = []
self.number_lines = len(voucher_lines)
for i in range(0, min(10,self.number_lines)):
if i < self.number_lines:
res = {
'date_due' : voucher_lines[i].date_due,
'name' : voucher_lines[i].name,
'amount_original' : voucher_lines[i].amount_original and voucher_lines[i].amount_original or False,
'amount_unreconciled' : voucher_lines[i].amount_unreconciled and voucher_lines[i].amount_unreconciled or False,
'amount' : voucher_lines[i].amount and voucher_lines[i].amount or False,
}
else :
res = {
'date_due' : False,
'name' : False,
'amount_original' : False,
'amount_due' : False,
'amount' : False,
}
result.append(res)
return result
class report_check(osv.AbstractModel):
_name = 'report.account_check_writing.report_check'
_inherit = 'report.abstract_report'
_template = 'account_check_writing.report_check'
_wrapped_report_class = report_print_check
|
caseydavenport/calico-docker | tests/st/policy/test_profile.py | Python | apache-2.0 | 15,579 | 0.000514 | # Copyright 2015 Tigera, In | c
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the | License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import netaddr
import yaml
from nose_parameterized import parameterized
from tests.st.test_base import TestBase
from tests.st.utils.docker_host import DockerHost, CLUSTER_STORE_DOCKER_OPTIONS
from tests.st.utils.exceptions import CommandExecError
from tests.st.utils.utils import assert_network, assert_profile, \
assert_number_endpoints, get_profile_name
POST_DOCKER_COMMANDS = ["docker load -i /code/calico-node.tar",
"docker load -i /code/busybox.tar",
"docker load -i /code/workload.tar"]
class MultiHostMainline(TestBase):
@parameterized.expand([
#"tags",
"rules.tags",
#"rules.protocol.icmp",
#"rules.ip.addr",
#"rules.ip.net",
#"rules.selector",
#"rules.tcp.port",
#"rules.udp.port",
])
def test_multi_host(self, test_type):
"""
Run a mainline multi-host test.
Because multihost tests are slow to setup, this tests most mainline
functionality in a single test.
- Create two hosts
- Create a network using the default IPAM driver, and a workload on
each host assigned to that network.
- Create a network using the Calico IPAM driver, and a workload on
each host assigned to that network.
- Check that hosts on the same network can ping each other.
- Check that hosts on different networks cannot ping each other.
- Modify the profile rules
- Check that connectivity has changed to match the profile we set up
- Re-apply the original profile
- Check that connectivity goes back to what it was originally.
"""
with DockerHost("host1",
additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
post_docker_commands=POST_DOCKER_COMMANDS,
start_calico=False) as host1, \
DockerHost("host2",
additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,
post_docker_commands=POST_DOCKER_COMMANDS,
start_calico=False) as host2:
(n1_workloads, n2_workloads, networks) = \
self._setup_workloads(host1, host2)
# Get the original profiles:
output = host1.calicoctl("get profile -o yaml")
original_profiles = yaml.safe_load(output)
# Make a copy of the profiles to mess about with.
new_profiles = copy.deepcopy(original_profiles)
if test_type == "tags":
profile0_tag = new_profiles[0]['metadata']['tags'][0]
profile1_tag = new_profiles[1]['metadata']['tags'][0]
# Make a new profiles dict where the two networks have each
# other in their tags list
new_profiles[0]['metadata']['tags'].append(profile1_tag)
new_profiles[1]['metadata']['tags'].append(profile0_tag)
self._apply_new_profile(new_profiles, host1)
# Check everything can contact everything else now
self.assert_connectivity(retries=2,
pass_list=n1_workloads + n2_workloads)
elif test_type == "rules.tags":
profile0_tag = new_profiles[0]['metadata']['tags'][0]
profile1_tag = new_profiles[1]['metadata']['tags'][0]
rule0 = {'action': 'allow',
'source':
{'tag': profile1_tag}}
rule1 = {'action': 'allow',
'source':
{'tag': profile0_tag}}
new_profiles[0]['spec']['ingress'].append(rule0)
new_profiles[1]['spec']['ingress'].append(rule1)
self._apply_new_profile(new_profiles, host1)
# Check everything can contact everything else now
self.assert_connectivity(retries=3,
pass_list=n1_workloads + n2_workloads)
elif test_type == "rules.protocol.icmp":
rule = {'action': 'allow',
'source':
{'protocol': 'icmp'}}
# The copy.deepcopy(rule) is needed to ensure that we don't
# end up with a yaml document with a reference to the same
# rule. While this is probably legal, it isn't main line.
new_profiles[0]['spec']['ingress'].append(rule)
new_profiles[1]['spec']['ingress'].append(copy.deepcopy(rule))
self._apply_new_profile(new_profiles, host1)
# Check everything can contact everything else now
self.assert_connectivity(retries=2,
pass_list=n1_workloads + n2_workloads)
elif test_type == "rules.ip.addr":
prof_n1, prof_n2 = self._get_profiles(new_profiles)
for workload in n1_workloads:
ip = workload.ip
rule = {'action': 'allow',
'source':
{'net': '%s/32' % ip}}
prof_n2['spec']['ingress'].append(rule)
for workload in n2_workloads:
ip = workload.ip
rule = {'action': 'allow',
'source':
{'net': '%s/32' % ip}}
prof_n1['spec']['ingress'].append(rule)
self._apply_new_profile(new_profiles, host1)
self.assert_connectivity(retries=2,
pass_list=n1_workloads + n2_workloads)
elif test_type == "rules.ip.net":
prof_n1, prof_n2 = self._get_profiles(new_profiles)
n1_ips = [workload.ip for workload in n1_workloads]
n2_ips = [workload.ip for workload in n2_workloads]
n1_subnet = netaddr.spanning_cidr(n1_ips)
n2_subnet = netaddr.spanning_cidr(n2_ips)
rule = {'action': 'allow',
'source':
{'net': str(n1_subnet)}}
prof_n2['spec']['ingress'].append(rule)
rule = {'action': 'allow',
'source':
{'net': str(n2_subnet)}}
prof_n1['spec']['ingress'].append(rule)
self._apply_new_profile(new_profiles, host1)
self.assert_connectivity(retries=2,
pass_list=n1_workloads + n2_workloads)
elif test_type == "rules.selector":
new_profiles[0]['metadata']['labels'] = {'net': 'n1'}
new_profiles[1]['metadata']['labels'] = {'net': 'n2'}
rule = {'action': 'allow',
'source':
{'selector': 'net=="n2"'}}
new_profiles[0]['spec']['ingress'].append(rule)
rule = {'action': 'allow',
'source':
{'selector': "net=='n1'"}}
new_profiles[1]['spec']['ingress'].append(rule)
self._apply_new_profile(new_profiles, host1)
self.assert_connectivity(retries=2,
pass_list=n1_workloads + n2_workloads)
elif test_type == "rules.tcp.port":
rule = {'action': 'allow',
|
QubesOS/qubes-core-admin | qubes/tests/vm/appvm.py | Python | lgpl-2.1 | 7,275 | 0.001925 | # -*- encoding: utf-8 -*-
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2017 Marek Marczykowski-Górecki
# <marmarek@invisiblethingslab.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
from unittest import mock
import lxml.etree
import qubes.storage
import qubes.tests
import qubes.tests.vm.qubesvm
import qubes.vm.ap | pvm
import qubes.vm.templatevm
class TestApp(object):
labels | = {1: qubes.Label(1, '0xcc0000', 'red')}
def __init__(self):
self.domains = {}
class TestProp(object):
# pylint: disable=too-few-public-methods
__name__ = 'testprop'
class TestVM(object):
# pylint: disable=too-few-public-methods
app = TestApp()
def __init__(self, **kwargs):
self.running = False
self.installed_by_rpm = False
for k, v in kwargs.items():
setattr(self, k, v)
def is_running(self):
return self.running
class TestVolume(qubes.storage.Volume):
def create(self):
pass
class TestPool(qubes.storage.Pool):
def __init__(self, *args, **kwargs):
super(TestPool, self).__init__(*args, **kwargs)
self._volumes = {}
def init_volume(self, vm, volume_config):
vid = '{}/{}'.format(vm.name, volume_config['name'])
assert volume_config.pop('pool', None) == self
vol = TestVolume(vid=vid, pool=self, **volume_config)
self._volumes[vid] = vol
return vol
def get_volume(self, vid):
return self._volumes[vid]
class TC_90_AppVM(qubes.tests.vm.qubesvm.QubesVMTestsMixin,
qubes.tests.QubesTestCase):
def setUp(self):
super().setUp()
self.app.pools['default'] = TestPool(name='default')
self.app.pools['linux-kernel'] = mock.Mock(**{
'init_volume.return_value.pool': 'linux-kernel'})
self.template = qubes.vm.templatevm.TemplateVM(self.app, None,
qid=1, name=qubes.tests.VMPREFIX + 'template')
self.app.domains[self.template.name] = self.template
self.app.domains[self.template] = self.template
self.addCleanup(self.cleanup_appvm)
def cleanup_appvm(self):
self.template.close()
del self.template
self.app.domains.clear()
self.app.pools.clear()
def get_vm(self, **kwargs):
vm = qubes.vm.appvm.AppVM(self.app, None,
qid=2, name=qubes.tests.VMPREFIX + 'test',
template=self.template,
**kwargs)
self.addCleanup(vm.close)
return vm
def test_000_init(self):
self.get_vm()
def test_001_storage_init(self):
vm = self.get_vm()
self.assertTrue(vm.volume_config['private']['save_on_stop'])
self.assertFalse(vm.volume_config['private']['snap_on_start'])
self.assertIsNone(vm.volume_config['private'].get('source', None))
self.assertFalse(vm.volume_config['root']['save_on_stop'])
self.assertTrue(vm.volume_config['root']['snap_on_start'])
self.assertEqual(vm.volume_config['root'].get('source', None),
self.template.volumes['root'])
self.assertFalse(
vm.volume_config['volatile'].get('save_on_stop', False))
self.assertFalse(
vm.volume_config['volatile'].get('snap_on_start', False))
self.assertIsNone(vm.volume_config['volatile'].get('source', None))
def test_002_storage_template_change(self):
vm = self.get_vm()
# create new mock, so new template will get different volumes
self.app.pools['default'] = mock.Mock(**{
'init_volume.return_value.pool': 'default'})
template2 = qubes.vm.templatevm.TemplateVM(self.app, None,
qid=3, name=qubes.tests.VMPREFIX + 'template2')
self.app.domains[template2.name] = template2
self.app.domains[template2] = template2
vm.template = template2
self.assertFalse(vm.volume_config['root']['save_on_stop'])
self.assertTrue(vm.volume_config['root']['snap_on_start'])
self.assertNotEqual(vm.volume_config['root'].get('source', None),
self.template.volumes['root'].source)
self.assertEqual(vm.volume_config['root'].get('source', None),
template2.volumes['root'])
def test_003_template_change_running(self):
vm = self.get_vm()
with mock.patch.object(vm, 'get_power_state') as mock_power:
mock_power.return_value = 'Running'
with self.assertRaises(qubes.exc.QubesVMNotHaltedError):
vm.template = self.template
def test_004_template_reset(self):
vm = self.get_vm()
with self.assertRaises(qubes.exc.QubesValueError):
vm.template = qubes.property.DEFAULT
def test_500_property_migrate_template_for_dispvms(self):
xml_template = '''
<domain class="AppVM" id="domain-1">
<properties>
<property name="qid">1</property>
<property name="name">testvm</property>
<property name="label" ref="label-1" />
<property name="dispvm_allowed">{value}</property>
</properties>
</domain>
'''
xml = lxml.etree.XML(xml_template.format(value='True'))
vm = qubes.vm.appvm.AppVM(self.app, xml)
self.assertEqual(vm.template_for_dispvms, True)
with self.assertRaises(AttributeError):
vm.dispvm_allowed
xml = lxml.etree.XML(xml_template.format(value='False'))
vm = qubes.vm.appvm.AppVM(self.app, xml)
self.assertEqual(vm.template_for_dispvms, False)
with self.assertRaises(AttributeError):
vm.dispvm_allowed
def test_600_load_volume_config(self):
xml_template = '''
<domain class="AppVM" id="domain-1">
<properties>
<property name="qid">1</property>
<property name="name">testvm</property>
<property name="label" ref="label-1" />
</properties>
<volume-config>
<volume name="root" pool="lvm" revisions_to_keep="3" rw="True"
size="1234" vid="qubes_dom0/vm-testvm-root" />
</volume-config>
</domain>
'''
xml = lxml.etree.XML(xml_template)
vm = qubes.vm.appvm.AppVM(self.app, xml)
self.assertEqual(vm.volume_config['root']['revisions_to_keep'], '3')
self.assertEqual(vm.volume_config['root']['rw'], True)
self.assertEqual(vm.volume_config['root']['size'], '1234')
self.assertEqual(vm.volume_config['root']['vid'],
'qubes_dom0/vm-testvm-root')
|
sgiavasis/nipype | nipype/interfaces/afni/tests/test_auto_MaskTool.py | Python | bsd-3-clause | 1,585 | 0.024606 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..preprocess import MaskTool
def test_MaskTool_inputs():
input_map = dict(args=dict(argstr='%s',
),
count=dict(argstr='-count',
position=2,
),
datum=dict(argstr='-datum %s',
),
dilate_inputs=dict(argstr='-dilate_inputs %s',
),
dilate_results=dict(argstr='-dilate_results %s',
),
environ=dict(nohash=True,
usedefault=True,
),
fill_dirs=dict(argstr='-fill_dirs %s',
requires=['fill_holes'],
),
fill_holes=dict(argstr='-fill_holes',
),
frac=dict(argstr='-frac %s',
),
| ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='-input %s',
copyfile=False,
mandatory=True,
position=-1,
),
inter=dict(argstr='-inter',
),
out_file=dict(argstr='-prefix %s',
name_source='in_file',
name_template='%s_mask',
),
outputtype=dict(),
terminal_output=dict(nohash=True,
),
union=dict(argstr='-union',
),
)
inputs = | MaskTool.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_MaskTool_outputs():
output_map = dict(out_file=dict(),
)
outputs = MaskTool.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
deepak7mahto/ForensicsTool | ForensicsTool/ForensicsTool/Forensics_tool_redesigned_using_oops.py | Python | mit | 14,272 | 0.007077 | import os, colorama, random_functions, module1, module2, module3, module4, module5, module6, module7
class Main1(random_functions.random_functions_class):
def tool_menu_front(self):
try:
colorama.init(autoreset=True)
self.seperator()
self.logo()
self.seperator()
print "Select the Module"
self.seperator()
print "1. Registry Forensics"
print "2. Event Manager Forensics"
print "3. File Hashing"
print "4. JPEG Forensics"
print "5. Browser Forensics"
print "6. Sataganography"
print "7. Port Scanner"
self.m1 = module1.Registry_Forensics_module1()
self.m2 = module2.event_manager_module()
self.m3 = module3.File_hashing_class()
self.m4 = module4.Jpeg_forensics_class()
self.m5 = module5.Browser_forensics_class()
self.m6 = module6.Staganography_class()
self.m7 = module7.Port_Scanner_class()
options_to_be_presented = [self.m1.module1,
self.m2.module2,
self.m3.module3,
self.m4.module4,
self.m5.module5,
self.m6.module6,
self.m7.module7]
self.seperator()
choice = int(raw_input("Enter Choice : "))-1
print colorama.ansi.clear_screen(2)
self.choice_logic(options_to_be_presented, choice)
self.tool_menu_front()
except KeyboardInterrupt as e:
os.system("cls")
self.seperator()
print colorama.Fore.GREEN + "Shutting Down"
self.seperator()
except Exception as e:
pass
def main():
Forensics_tool_object = Main1()
Forensics_tool_object.tool_menu_front()
if __name__ == "__main__":
main()
"""
+-----------------------------------------+
| random_functions.random_functions_class |
+-----------------------------------------+
.
/_\
| [ random_functions.random_functions_class ] [ random_functions.random_functions_class ] [ random_functions.random_functions_class ] [ random_functions.random_functions_class ] [ random_functions.random_functions_class ]
| . . . . | .
| | /_\ /_\ /_\ /_\ /_\
| | | | | |
| | | | | |
+-----------------+ +-------------------------------+ +----------------------------+ +----------------------+ +--------------------+ +----------------------+
| Main1 | | Browser_forensics_class | | Registry_Forensics_module1 | | event_manager_module | | File_hashing_class | | Jpeg_forensics_class |
|-----------------| |-------------------------------| |----------------------------| |----------------------| |--------------------| |----------------------|
| m1 | ----> [ module2.event_manager_module ] | module5 | | module1 | | module2 | | __init__ | | module4 |
| m2 | ----> [ module4.Jpeg_forensics_class ] | chrome_forensics | | usb_module | | Evtx_dumper | | module3 | +----------------------+
| m3 | ----> [ module1.Registry_Forensics_module1 ] | autofill_data_module | | recent_docs_module | | spinning_cursor | | hash_genertion |
| m4 | ----> [ module3.File_hashing_class ] | top_sites_module | | mounted_devices_module | | login_analyzer | | hash_comparison |
| m5 | ----> [ module5.Browser_forensics_class ] | history_module | +----------------------------+ +----------------------+ +--------------------+
|-----------------| | downloads_module |
| tool_menu_front | | keyword_module |
+-----------------+ | generic_data_viewer |
| tables_name_fetch | |
Jelleas/CheckPy | checkpy/assertlib/__init__.py | Python | mit | 38 | 0 | from checkpy.assertlib.basic import | *
| |
mrares/incubator-airflow | tests/utils/log/test_logging.py | Python | apache-2.0 | 4,229 | 0.000473 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from airflow.utils.log.s3_task_handler import S3TaskHandler
class TestS3TaskHandler(unittest.TestCase):
def setUp(self):
super(S3TaskHandler, self).setUp()
self.remote_log_location = 'remote/log/location'
self.hook_patcher = mock.patch("airflow.hooks.S3_hook.S3Hook")
self.hook_mock = self.hook_patcher.start()
self.hook_inst_mock = self.hook_mock.return_value
self.hook_key_mock = self.hook_inst_mock.get_key.return_value
self.hook_key_mock.get_contents_as_string.return_value.decode.\
return_value = 'content'
def tearDown(self):
self.hook_patcher.stop()
super(S3TaskHandler, self).tearDown()
def test_init(self):
S3TaskHandler()
self.hook_mock.assert_called_once_with('')
def test_init_raises(self):
self.hook_mock.side_effect = Exception('Failed to connect')
handler = S3TaskHandler()
with mock.patch.object(handler.log, 'error') as mock_error:
# Initialize the hook
handler.hook()
mock_error.assert_called_once_with(
'Could not create an S3Hook with connection id "". Please make '
'sure that airflow[s3] is installed and the S3 connection exists.'
)
def test_log_exists(self):
self.assertTrue(S3TaskHandler().log_exists(self.remote_log_location))
def test_log_exists_none(self):
self.hook_inst_mock.get_key.return_value = None
self.assertFalse(S3TaskHandler().log_exists(self.remote_log_location))
def test_log_exists_raises(self):
self.hook_inst_mock.get_key.side_effect = Exception('error')
self.assertFalse(S3TaskHandler().log_exists(self.remote_log_location))
def test_log_exists_no_hook(self):
self.hook_mock.side_effect = Exception('Failed to connect')
self.assertFalse(S3TaskHandler().log_exists(self.remote_log_location))
def test_read(self):
self.assertEqual(
S3TaskHandler().read(self.remote_log_location),
'content'
)
def test_read_key_empty(self):
self.hook_inst_mock.get_key.return_value = None
self.assertEqual(S3TaskHandler().read(self.remote_log_location), '')
def test_read_raises(self):
self.hook_inst_mock.get_key.side_effect = Exception('error')
self.assertEqual(S3TaskHandler().read(self.remote_log_location), '')
def test_read_raises_return_error(self):
self.hook_inst_mock.get_key.side_effect = Exception('error')
handler = S3TaskHandler()
with mock.patch.object(handler.log, 'error') as mock_error:
result = handler.s3_log_read(
self.remote_log_location,
return_error=True
)
msg = 'Could not read logs from %s' % self.remote_log_location
self.assertEqual(result, msg)
mock_error.assert_called_once_with(msg)
def test_write(self):
S3TaskHandler().write | ('text', self.remote_log_location)
self.hook_inst_mock.load_string.assert_called_once_with(
'content\ntext',
key=self.remote_log_location,
replace=True,
encrypt=False,
)
def test_write_raises(s | elf):
self.hook_inst_mock.load_string.side_effect = Exception('error')
handler = S3TaskHandler()
with mock.patch.object(handler.log, 'error') as mock_error:
handler.write('text', self.remote_log_location)
msg = 'Could not write logs to %s' % self.remote_log_location
mock_error.assert_called_once_with(msg)
|
immo/pyTOM | df/df_interpreter.py | Python | gpl-3.0 | 1,707 | 0.018161 | # coding: utf-8
#
# drums-backend a simple interactive audio sampler that plays vorbis samples
# Copyright (C) 2009 C.D. Immanuel Albrecht
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import code
import string
from df_global import *
class DfInterpreter(code.InteractiveInterpreter):
"""class that will act as input-stream for python code sent by the user interface"""
vars = DfGlobal()
cached = ""
def __init__(self,globalvars,locals=None):
vars = globalvars
code.InteractiveInterpreter.__init__(self, locals)
self.tracebacked = False
self.oldshowtraceback = self.showtraceback
def new_traceback(*args):
| self.tracebacked = True
return self.oldshowtraceback(*args)
self.showtraceback = new_traceback
def write(self,data):
send = "PYTHON:" + data.replace("\n","\nPYTHON':")+"\n"
self.vars["ui_out"].write(send)
self.vars["ui_out"].flush()
def hasTracebacked(self):
if self.tracebacked:
self.trac | ebacked = False
return True
else:
return False
|
drewUCL/Automation-Station | mysite/mysite/settings.py | Python | mit | 3,404 | 0.003231 | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')@2g1*#04r-8xv1+-cezcbw4qb(+=pk@j=toa_!q69@sv(0bc#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'personal',
'webapp',
'blog',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.cont | rib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
| 'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'python_to_db',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost',
}
}
'''
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
JeremyCCHsu/Python-Wrapper-for-World-Vocoder | pyworld/__init__.py | Python | mit | 176 | 0 | from __future__ | import division, print_function, absolute_import
import pkg_resources
__version__ = pkg_resources.get_distribution('pyworld').version
from .pyworld | import *
|
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/hgext/relink.py | Python | gpl-3.0 | 6,076 | 0.001317 | # Mercurial extension to provide 'hg relink' command
#
# Copyright (C) 2007 Brendan Cully <brendan@kublai.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""recreates hardlinks between repository clones"""
from mercurial import hg, util
from mercurial.i18n import _
import os, stat
def relink(ui, repo, origin=None, **opts):
"""recreate hardlinks between two repositories
When repositories are cloned locally, their data files will be
hardlinked so that they only use the space of a single repository.
Unfortunately, subsequent pulls into either repository will break
hardlinks for any files touched by the new changesets, even if
both repositories end up pulling the same changes.
Similarly, passing --rev to "hg clone" will fail to use any
hardlinks, falling back to a complete copy of the source
repository.
This command lets you recreate those hardlinks and reclaim that
wasted space.
This repository will be relinked to share space with ORIGIN, which
must be on the same local disk. If ORIGIN is omitted, looks for
"default-relink", then "default", in [paths].
Do not attempt any read operations on this repository while the
command is running. (Both repositories will be locked against
writes.)
"""
if (not util.safehasattr(util, 'samefile') or
not util.safehasattr(util, 'samedevice')):
raise util.Abort(_('hardlinks are not supported on this system'))
src = hg.repository(ui, ui.expandpath(origin or 'default-relink',
origin or 'default'))
if not src.local():
raise util.Abort(_('must specify local origin repository'))
ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path))
if repo.root == src.root:
ui.status(_('there is nothing to relink\n'))
return
locallock = repo.lock()
try:
remotelock = src.lock()
try:
candidates = sorted(collect(src, ui))
targets = prune(candidates, src.store.path, repo.store.path, ui)
do_relink(src.store.path, repo.store.path, targets, ui)
finally:
| remotelock.release()
finally:
locallock.release()
def collect(src, ui):
seplen = len(os.path.sep)
candidates = | []
live = len(src['tip'].manifest())
# Your average repository has some files which were deleted before
# the tip revision. We account for that by assuming that there are
# 3 tracked files for every 2 live files as of the tip version of
# the repository.
#
# mozilla-central as of 2010-06-10 had a ratio of just over 7:5.
total = live * 3 // 2
src = src.store.path
pos = 0
ui.status(_("tip has %d files, estimated total number of files: %s\n")
% (live, total))
for dirpath, dirnames, filenames in os.walk(src):
dirnames.sort()
relpath = dirpath[len(src) + seplen:]
for filename in sorted(filenames):
if not filename[-2:] in ('.d', '.i'):
continue
st = os.stat(os.path.join(dirpath, filename))
if not stat.S_ISREG(st.st_mode):
continue
pos += 1
candidates.append((os.path.join(relpath, filename), st))
ui.progress(_('collecting'), pos, filename, _('files'), total)
ui.progress(_('collecting'), None)
ui.status(_('collected %d candidate storage files\n') % len(candidates))
return candidates
def prune(candidates, src, dst, ui):
def linkfilter(src, dst, st):
try:
ts = os.stat(dst)
except OSError:
# Destination doesn't have this file?
return False
if util.samefile(src, dst):
return False
if not util.samedevice(src, dst):
# No point in continuing
raise util.Abort(
_('source and destination are on different devices'))
if st.st_size != ts.st_size:
return False
return st
targets = []
total = len(candidates)
pos = 0
for fn, st in candidates:
pos += 1
srcpath = os.path.join(src, fn)
tgt = os.path.join(dst, fn)
ts = linkfilter(srcpath, tgt, st)
if not ts:
ui.debug('not linkable: %s\n' % fn)
continue
targets.append((fn, ts.st_size))
ui.progress(_('pruning'), pos, fn, _('files'), total)
ui.progress(_('pruning'), None)
ui.status(_('pruned down to %d probably relinkable files\n') % len(targets))
return targets
def do_relink(src, dst, files, ui):
def relinkfile(src, dst):
bak = dst + '.bak'
os.rename(dst, bak)
try:
util.oslink(src, dst)
except OSError:
os.rename(bak, dst)
raise
os.remove(bak)
CHUNKLEN = 65536
relinked = 0
savedbytes = 0
pos = 0
total = len(files)
for f, sz in files:
pos += 1
source = os.path.join(src, f)
tgt = os.path.join(dst, f)
# Binary mode, so that read() works correctly, especially on Windows
sfp = file(source, 'rb')
dfp = file(tgt, 'rb')
sin = sfp.read(CHUNKLEN)
while sin:
din = dfp.read(CHUNKLEN)
if sin != din:
break
sin = sfp.read(CHUNKLEN)
sfp.close()
dfp.close()
if sin:
ui.debug('not linkable: %s\n' % f)
continue
try:
relinkfile(source, tgt)
ui.progress(_('relinking'), pos, f, _('files'), total)
relinked += 1
savedbytes += sz
except OSError, inst:
ui.warn('%s: %s\n' % (tgt, str(inst)))
ui.progress(_('relinking'), None)
ui.status(_('relinked %d files (%s reclaimed)\n') %
(relinked, util.bytecount(savedbytes)))
cmdtable = {
'relink': (
relink,
[],
_('[ORIGIN]')
)
}
|
nZac/keg-elements | keg_elements/db/utils.py | Python | bsd-3-clause | 2,393 | 0.001672 | import math
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import DateTime
from bla | zeutils.strings import randchars
from keg.db import db
class utcnow(expression.FunctionElement):
type = DateTime()
@compiles(utcnow, 'postgresql')
def _pg_utcnow(element, compiler, **kw):
re | turn "TIMEZONE('utc', CURRENT_TIMESTAMP)"
@compiles(utcnow, 'mssql')
def _ms_utcnow(element, compiler, **kw):
return "GETUTCDATE()"
@compiles(utcnow, 'sqlite')
def _sqlite_utcnow(element, compiler, **kw):
return "CURRENT_TIMESTAMP"
def validate_unique_exc(exc):
return _validate_unique_msg(db.engine.dialect.name, str(exc))
def _validate_unique_msg(dialect, msg):
"""
Does the heavy lifting for validate_unique_exception().
Broken out separately for easier unit testing. This function takes string args.
"""
if 'IntegrityError' not in msg:
raise ValueError('"IntegrityError" exception not found')
if dialect == 'postgresql':
if 'duplicate key value violates unique constraint' in msg:
return True
elif dialect == 'mssql':
if 'Cannot insert duplicate key' in msg:
return True
elif dialect == 'sqlite':
if 'UNIQUE constraint failed' in msg:
return True
else:
raise ValueError('is_unique_exc() does not yet support dialect: %s' % dialect)
return False
def randemail(length, randomizer=randchars):
"""Generate a random email address at the given length.
:param length: must be at least 7 or the funuction will throw a ValueError.
:param randomzer: is a function for generating random characters. It must have an identical
interface to `randchars`. The default function is `randchars`.
"""
if length < 7:
raise ValueError('length must be at least 7')
half = (length - 2 - 3) / 2.0 # 2 characters for @ and . and 3 for TLD
return (randomizer(int(math.floor(half)), 'alphanumeric')
+ '@' + randomizer(int(math.ceil(half)), 'alphanumeric')
+ '.' + randomizer(3, 'alpha'))
def session_commit():
try:
db.session.commit()
except Exception:
db.session.rollback()
raise
def session_flush():
try:
db.session.flush()
except Exception:
db.session.rollback()
raise
|
braceio/tags | tags/generator.py | Python | mit | 7,669 | 0.002738 | import os
import sys
import time
import posixpath
import threading
if sys.version > '3':
import urllib.parse
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
else:
import urllib
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from . import tags
from . import utils
from . import templatelang
def build_file(filename, outfilename, root='.', create_dir=True):
filepath = os.path.join(root, filename)
with utils.open_file(filepath) as infile:
try:
if sys.version > '3':
content = str(infile.read(), 'utf-8')
else:
content = unicode(infile.read(), 'utf-8')
output = tags.render(content, filename=filename, rootdir=root)
except templatelang.ParseBaseException as e:
utils.print_parse_exception(e, filename)
return
with utils.open_file(outfilename, "w", create_dir=create_dir) as outfile:
if sys.version > '3':
outfile.write(output)
else:
outfile.write(output.encode('utf-8'))
def build_files(root='.', dest='_site', pattern='**/*.html',
exclude='_*/**', watch=False, force=False):
try:
os.stat(os.path.join(root, 'index.html'))
except OSError:
if not force:
msg = "Oops, we can't find an index.html in the source folder.\n"+\
"If you want to build this folder anyway, use the --force\n"+\
"option."
print(msg)
sys.exit(1)
print("Building site from '{0}' into '{1}'".format(root, dest))
exclude = exclude or os.path.join(dest, '**')
for filename in utils.walk_folder(root or '.'):
included = utils.matches_pattern(pattern, filename)
excluded = utils.matches_pattern(exclude, filename)
destfile = os.path.join(dest, filename)
if included and not excluded:
build_file(filename, destfile, root=root)
elif not excluded:
filepath = os.path.join(root, filename)
destpath = os.path.join(dest, filename)
utils.copy_file(filepath, destpath)
if watch:
observer = _watch(root=root,
dest=dest,
pattern=pattern,
exclude=exclude)
if not observer:
return
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def _watch(root='.', dest='_site', pattern='**/*.html', exclude='_*/**'):
try:
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
except ImportError:
msg = "The build --watch feature requires watchdog. \n"\
+ "Please install it with 'easy_install watchdog'."
print(msg)
return None
class handler(FileSystemEventHandler):
def on_any_event(self, event):
exclude_path = os.path.join(os.getcwd(), exclude)
if not utils.matches_pattern(exclude_path, event.src_path):
build_files(root=root,
dest=dest,
pattern=pattern,
exclude=exclude)
observer = Observer()
observer.schedule(handler(), root, recursive=True)
observer.start()
print("Watching '{0}' ...".format(root))
return observer
def serve_files(root='.', dest='_site', pattern='**/*.html',
exclude='_*/**', watch=False, port=8000, force=False):
# setup server
class RequestHandler(SimpleHTTPRequestHandler):
def translate_path(self, path):
root = os.path.join(os.getcwd(), dest)
# normalize path and prepend root directory
path = path.split('?',1)[0]
path = path.split('#',1)[0]
if sys.version > '3':
path = posixpath.normpath(urllib.parse.unquote(path))
else:
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = [_f for _f in words if _f]
path = root
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
return path
class StoppableHTTPServer(HTTPServer):
def serve_until_shutdown(self):
self._stopped = False
while not self._stopped:
try:
httpd.handle_request()
except:
self._stopped=True
self.server_close()
def shutdown(self):
self._stopped = True
self.server_close()
server_address = ('', port)
httpd = StoppableHTTPServer(server_address, RequestHandler)
server_thread = threading.Thread(
target=httpd.serve_until_shutdown)
server_thread.daemon = True
server_thread.start()
print("HTTP server started on port {0}".format(server_address[1]))
# build files
build_files(root=root,
dest=dest,
pattern=pattern,
exclude=exclude,
force=force)
# watch files while server running
if watch:
observer = _watch(root=root,
dest=dest,
pattern=pattern,
exclude=exclude)
if not observer:
return
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
httpd.shutdown()
observer.join()
else:
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
httpd.shutdown()
NEW_INDEX_STR = """<!DOCTYPE html>
<html>
{% include _partials/header.html %}
<body>
{% include _partials/nav.html %}
<h1>Welcome!</h1>
</body>
</html>"""
NEW_ABOUT_STR = """<!DOCTYPE html>
<html>
{% include _partials/header.html %}
<body>
{% include _partials/nav.html %}
<h1>About!</h1>
</body>
</html>"""
NEW_HEADER_STR = """
<head>
<title>My new site</title>
<link rel="stylesheet" href="/css/style.css" />
</head>"""
NEW_NAV_STR = """
<ul>
<li>
<a href="/"{% is index.html %} class="active"{% endis %}>
home
</a>
</li>
| <li>
<a href="/about.html"{% is about.html %} class="active"{% endis %}>
about
</a>
</li>
</ul>"""
NEW_STYLE_STR = """.active {font-weight:bold;}"""
NEW_SITE = {
'index.html': NEW_INDEX_STR,
'about.html': NEW_ABOUT_STR,
'_partials/header.html': NEW_HEADER_STR,
'_partials/nav.html': NEW_NAV_STR,
'css/style.css': NEW_STYLE_STR
}
def new_site(root='.', force=False):
try:
os.stat(os.path.join(root, 'index.html'))
if not force:
| msg = "Oops, there's already an index.html file in the source \n"+\
"folder. If you want to overwrite this folder with a new \n"+\
"site, use the --force option."
print(msg)
sys.exit(1)
except OSError:
pass
print("Creating new site in '{0}'.".format(root))
for fname, text in list(NEW_SITE.items()):
fpath = os.path.join(root, fname)
with utils.open_file(fpath, "w", create_dir=True) as afile:
afile.write(text)
|
openstack/cinder | cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py | Python | apache-2.0 | 235,073 | 0.000009 | # Copyright (c) 2020 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
from copy import deepcopy
import time
from unittest import mock
import six
from cinder import exception
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder.tests.unit.volume.drivers.dell_emc.powermax import (
powermax_data as tpd)
from cinder.tests.unit.volume.drivers.dell_emc.powermax import (
powermax_fake_objects as tpfo)
from cinder.volume.drivers.dell_emc.powermax import common
from cinder.volume.drivers.dell_emc.powermax import fc
from cinder.volume.drivers.dell_emc.powermax import masking
from cinder.volume.drivers.dell_emc.powermax import metadata
from cinder.volume.drivers.dell_emc.powermax import provision
from cinder.volume.drivers.dell_emc.powermax import rest
from cinder.volume.drivers.dell_emc.powermax import utils
from cinder.volume import volume_utils
class PowerMaxCommonTest(test.TestCase):
def setUp(self):
self.data = tpd.PowerMaxData()
super(PowerMaxCommonTest, self).setUp()
self.mock_object(volume_utils, 'get_max_over_subscription_ratio',
return_value=1.0)
replication_device = self.data.sync_rep_device
configuration = tpfo.FakeConfiguration(
emc_file=None, volume_backend_name='CommonTests', interval=1,
retries=1, san_ip='1.1.1.1', san_login='smc',
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc', san_api_port=8443,
powermax_port_groups=[self.data.port_group_name_f],
powermax_port_group_name_template='portGroupName',
replication_device=replication_device)
rest.PowerMaxRest._establish_rest_session = mock.Mock(
return_value=tpfo.FakeRequestsSession())
driver = fc.PowerMaxFCDriver(configuration=configuration)
self.driver = driver
self.common = self.driver.common
self.masking = self.common.masking
self.provision = self.common.provision
self.rest = self.common.rest
self.utils = self.common.utils
self.utils.get_volumetype_extra_specs = (
mock.Mock(return_value=self.data.vol_type_extra_specs))
self.rest.is_snap_id = True
@mock.patch.object(rest.PowerMaxRest, 'get_array_ucode_version',
return_value=tpd.PowerMaxData.next_gen_ucode)
@mock.patch.object(rest.PowerMaxRest, 'get_array_model_info',
return_value=('PowerMax 2000', True))
@mock.patch.object(rest.PowerMaxRest, 'set_rest_credentials')
@mock.patch.object(common.PowerMaxCommon, '_get_slo_workload_combinations',
return_value=[])
@mock.patch.object(common.PowerMaxCommon,
'get_attributes_from_cinder_config',
side_effect=[[], tpd.PowerMaxData.array_info_wl])
def test_gather_info_tests(self, mck_parse, mck_combo, mck_rest,
mck_nextgen, mck_ucode):
# Use-Case 1: Gather info no-opts
configuration = tpfo.FakeConfiguration(
None, 'config_group', None, None)
fc.PowerMaxFCDriver(configuration=configuration)
# Use-Case 2: Gather info next-gen with ucode/version
self.common._gather_info()
self.assertTrue(self.common.next_gen)
self.assertEqual(self.common.ucode_level, self.data.next_gen_ucode)
@mock.patch.object(rest.PowerMaxRest, 'get_array_ucode_version',
return_value=tpd.PowerMaxData.next_gen_ucode)
@mock.patch.object(rest.PowerMaxRest, 'get_array_model_info',
return_value=('PowerMax 2000', True))
@mock.patch.object(rest.PowerMaxRest, 'set_rest_credentials')
@mock.patch.object(
common.PowerMaxCommon, 'get_attributes_from_cinder_config',
return_value={'SerialNumber': tpd.PowerMaxData.array})
@mock.patch.object(
common.PowerMaxCommon, '_get_attributes_from_config')
def test_gather_info_rep_enabled_duplicate_serial_numbers(
self, mck_get_cnf, mck_get_c_cnf, mck_set, mck_model, mck_ucode):
is_enabled = self.common.replication_enabled
targets = self.common.replication_targets
self.common.replication_enabled = True
self.common.replication_targets = [self.data.array]
self.assertRaises(
exception.InvalidConfigurationValue, self.common._gather_info)
self.common.replication_enabled = is_enabled
self.common.replication_targets = targets
@mock.patch.object(common.PowerMaxCommon,
'_gather_info')
def test_get_attributes_from_config_short_host_template(
self, mock_gather):
configuration = tpfo.FakeConfiguration(
emc_file=None, volume_backend_name='config_group', interval='10',
retries='10', replication_device=None,
powermax_short_host_name_template='shortHostName')
driver = fc.PowerMaxFCDriver(configuration=configuration)
driver.common._get_attributes_from_config()
self.assertEqual(
'shortHostName', driver.common.powermax_short_host_name_template)
@mock.patch.object(common.PowerMaxCommon,
'_gather_info')
def test_get_attributes_from_config_no_short_host_template(
self, mock_gather):
configuration = tpfo.FakeConfiguration(
emc_file=None, volume_backend_name='config_group', interval='10',
retries='10', replication_device=None)
driver = fc.PowerMaxFCDriver(configuration=configuration)
driver.common._get_attributes_from_config()
self.assertIsNone(driver.common.powermax_short_host_name_template)
@mock.patch.object(common.PowerMaxCommon,
'_gather_info')
def test_get_attributes_from_config_port_group_template(
self, mock_gather):
configuration = tpfo.FakeConfiguration(
emc_file=None, volume_backend_name='co | nfig_group', interval='10',
retries='10', replication_device=None,
powermax_port_group_name_template='portGroupName | ')
driver = fc.PowerMaxFCDriver(configuration=configuration)
driver.common._get_attributes_from_config()
self.assertEqual(
'portGroupName', driver.common.powermax_port_group_name_template)
@mock.patch.object(common.PowerMaxCommon,
'_gather_info')
def test_get_attributes_from_config_no_port_group_template(
self, mock_gather):
configuration = tpfo.FakeConfiguration(
emc_file=None, volume_backend_name='config_group', interval='10',
retries='10', replication_device=None)
driver = fc.PowerMaxFCDriver(configuration=configuration)
driver.common._get_attributes_from_config()
self.assertIsNone(driver.common.powermax_port_group_name_template)
def test_get_slo_workload_combinations_powermax(self):
self.common.next_gen = True
self.common.array_model = 'PowerMax_2000'
array_info = {}
pools = self.common._get_slo_workload_combinations(array_info)
self.assertTrue(len(pools) == 24)
def test_get_slo_workload_combinations_afa_powermax(self):
self.common.next_gen = True
self.common.array_model = 'VMAX250F'
array_info = {}
pools = self.common._get_slo_workload_combination |
lupyuen/RaspberryPiImage | usr/share/pyshared/ajenti/plugins/supervisor/__init__.py | Python | apache-2.0 | 287 | 0 | from ajenti.api import *
from ajenti.plugins import *
info = PluginInfo(
title='Supervisor',
icon='play',
dependencies=[
PluginDe | pendency('main'),
PluginDependency('services'),
BinaryDependency('supervisord'),
],
)
def init():
import | main
|
rdeheele/odoo | addons/website/models/ir_ui_view.py | Python | agpl-3.0 | 9,428 | 0.003076 | # -*- coding: utf-8 -*-
import copy
from lxml import etree, html
from openerp import SUPERUSER_ID, tools
from openerp.addons.website.models import website
from openerp.http import request
from openerp.osv import osv, fields
class view(osv.osv):
_inherit = "ir.ui.view"
_columns = {
'page': fields.boolean("Whether this view is a web page template (complete)"),
'website_meta_title': fields.char("Website meta title", size=70, translate=True),
'website_meta_description': fields.text("Website meta description", size=160, translate=True),
'website_meta_keywords': fields.char("Website meta keywords", translate=True),
'customize_show': fields.boolean("Show As Optional Inherit"),
'website_id': fields.many2one('website',ondelete='cascade', string="Website"),
}
_sql_constraints = [
('key_website_id_uniq', 'unique(key, website_id)',
'Key must be unique per website.'),
]
_defaults = {
'page': False,
'customize_show': False,
}
def _view_obj(self, cr, uid, view_id, context=None):
if isinstance(view_id, basestring):
return self.pool['ir.model.data'].xmlid_to_object(
cr, uid, view_id, raise_if_not_found=True, context=context
)
elif isinstance(view_id, (int, long)):
return self.browse(cr, uid, view_id, context=context)
# assume it's already a view object (WTF?)
return view_id
# Returns all views (called and inherited) related to a view
# Used by translation mechanism, SEO and optional templates
def _views_get(self, cr, uid, view_id, options=True, bundles=False, context=None, root=True):
""" For a given view ``view_id``, should return:
* the view itself
* all views inheriting from it, enabled or not
- but not the optional children of a non-enabled child
* all views called from it (via t-call)
"""
try:
view = self._view_obj(cr, uid, view_id, context=context)
except ValueError:
# Shall we log that ?
return []
while root and view.inherit_id:
view = view.inherit_id
result = [view]
node = etree.fromstring(view.arch)
xpath = "//t[@t-call]"
if bundles:
xpath += "| //t[@t-call-assets]"
for child in node.xpath(xpath):
try:
called_view = s | elf._view_obj(cr, uid, child.get('t-call', child.get('t-call-assets')), context=context)
except ValueError:
continue
if called_view not in result:
result += self._views_ge | t(cr, uid, called_view, options=options, bundles=bundles, context=context)
extensions = view.inherit_children_ids
if not options:
# only active children
extensions = (v for v in view.inherit_children_ids if v.active)
# Keep options in a deterministic order regardless of their applicability
for extension in sorted(extensions, key=lambda v: v.id):
for r in self._views_get(
cr, uid, extension,
# only return optional grandchildren if this child is enabled
options=extension.active,
context=context, root=False):
if r not in result:
result.append(r)
return result
def extract_embedded_fields(self, cr, uid, arch, context=None):
return arch.xpath('//*[@data-oe-model != "ir.ui.view"]')
def save_embedded_field(self, cr, uid, el, context=None):
Model = self.pool[el.get('data-oe-model')]
field = el.get('data-oe-field')
column = Model._all_columns[field].column
converter = self.pool['website.qweb'].get_converter_for(
el.get('data-oe-type'))
value = converter.from_html(cr, uid, Model, column, el)
if value is not None:
# TODO: batch writes?
Model.write(cr, uid, [int(el.get('data-oe-id'))], {
field: value
}, context=context)
def to_field_ref(self, cr, uid, el, context=None):
# filter out meta-information inserted in the document
attributes = dict((k, v) for k, v in el.items()
if not k.startswith('data-oe-'))
attributes['t-field'] = el.get('data-oe-expression')
out = html.html_parser.makeelement(el.tag, attrib=attributes)
out.tail = el.tail
return out
def replace_arch_section(self, cr, uid, view_id, section_xpath, replacement, context=None):
# the root of the arch section shouldn't actually be replaced as it's
# not really editable itself, only the content truly is editable.
[view] = self.browse(cr, uid, [view_id], context=context)
arch = etree.fromstring(view.arch.encode('utf-8'))
# => get the replacement root
if not section_xpath:
root = arch
else:
# ensure there's only one match
[root] = arch.xpath(section_xpath)
root.text = replacement.text
root.tail = replacement.tail
# replace all children
del root[:]
for child in replacement:
root.append(copy.deepcopy(child))
return arch
@tools.ormcache_context(accepted_keys=('website_id',))
def get_view_id(self, cr, uid, xml_id, context=None):
if context and 'website_id' in context and not isinstance(xml_id, (int, long)):
domain = [('key', '=', xml_id), '|', ('website_id', '=', context['website_id']), ('website_id', '=', False)]
[xml_id] = self.search(cr, uid, domain, order='website_id', limit=1, context=context)
else:
xml_id = super(view, self).get_view_id(cr, uid, xml_id, context=context)
return xml_id
def render(self, cr, uid, id_or_xml_id, values=None, engine='ir.qweb', context=None):
if request and getattr(request, 'website_enabled', False):
engine='website.qweb'
if isinstance(id_or_xml_id, list):
id_or_xml_id = id_or_xml_id[0]
if not context:
context = {}
company = self.pool['res.company'].browse(cr, SUPERUSER_ID, request.website.company_id.id, context=context)
qcontext = dict(
context.copy(),
website=request.website,
url_for=website.url_for,
slug=website.slug,
res_company=company,
user_id=self.pool.get("res.users").browse(cr, uid, uid),
translatable=context.get('lang') != request.website.default_lang_code,
editable=request.website.is_publisher(),
menu_data=self.pool['ir.ui.menu'].load_menus_root(cr, uid, context=context) if request.website.is_user() else None,
)
# add some values
if values:
qcontext.update(values)
# in edit mode ir.ui.view will tag nodes
context = dict(context, inherit_branding=qcontext.get('editable', False))
view_obj = request.website.get_template(id_or_xml_id)
if 'main_object' not in qcontext:
qcontext['main_object'] = view_obj
values = qcontext
return super(view, self).render(cr, uid, id_or_xml_id, values=values, engine=engine, context=context)
def _pretty_arch(self, arch):
# remove_blank_string does not seem to work on HTMLParser, and
# pretty-printing with lxml more or less requires stripping
# whitespace: http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output
# so serialize to XML, parse as XML (remove whitespace) then serialize
# as XML (pretty print)
arch_no_whitespace = etree.fromstring(
etree.tostring(arch, encoding='utf-8'),
parser=etree.XMLParser(encoding='utf-8', remove_blank_text=True))
return etree.tostring(
arch_no_whitespace, encoding='unicode', pretty_print=True)
def save(self, |
a-sk/alot | alot/db/utils.py | Python | gpl-3.0 | 15,380 | 0.001235 | # Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
import os
import email
import tempfile
import re
from email.header import Header
import email.charset as charset
charset.add_charset('utf-8', charset.QP, charset.QP, 'utf-8')
from email.iterators import typed_subpart_iterator
import logging
import mailcap
from cStringIO import StringIO
import alot.crypto as crypto
import alot.helper as helper
from alot.errors import GPGProblem
from alot.settings import settings
from alot.helper import string_sanitize
from alot.helper import string_decode
from alot.helper import parse_mailcap_nametemplate
from alot.helper import split_commandstring
X_SIGNATURE_VALID_HEADER = 'X-Alot-OpenPGP-Signature-Valid'
X_SIGNATURE_MESSAGE_HEADER = 'X-Alot-OpenPGP-Signature-Message'
def add_signature_headers(mail, sigs, error_msg):
'''Add pseudo headers to the mail indicating whether the signature
verification was successful.
:param mail: :class:`email.message.Message` the message to entitle
:param sigs: list of :class:`gpgme.Signature`
:param error_msg: `str` containing an error message, the empty
string indicating no error
'''
sig_from = ''
if len(sigs) == 0:
error_msg = error_msg or 'no signature found'
else:
try:
sig_from = crypto.get_key(sigs[0].fpr).uids[0].uid
except:
sig_from = sigs[0].fpr
mail.add_header(
X_SIGNATURE_VALID_HEADER,
'False' if error_msg else 'True',
)
mail.add_header(
X_SIGNATURE_MESSAGE_HEADER,
u'Invalid: {0}'.format(error_msg)
if error_msg else
u'Valid: {0}'.format(sig_from),
)
def get_params(mail, failobj=list(), header='content-type', unquote=True):
'''Get Content-Type parameters as dict.
RFC 2045 specifies that parameter names are case-insensitive, so
we normalize them here.
:param mail: :class:`email.message.Message`
:param failobj: object to return if no such header is found
:param header: the header to search for parameters, default
:param unquote: unquote the values
:returns: a `dict` containing the parameters
'''
return {k.lower(): v for k, v in mail.get_params(failobj, header, unquote)}
def message_from_file(handle):
'''Reads a mail from the given file-like object and returns an email
object, very much like email.message_from_file. In addition to
that OpenPGP encrypted data is detected and decrypted. If this
succeeds, any mime messages found in the recovered plaintext
message are added to the returned message object.
:param handle: a file-like object
:returns: :class:`email.message.Message` possibly augmented with
decrypted data
'''
m = email.message_from_file(handle)
# make sure noone smuggles a token in (data from m is untrusted)
del m[X_SIGNATURE_VALID_HEADER]
del m[X_SIGNATURE_MESSAGE_HEADER]
p = get_params(m)
app_pgp_sig = 'application/pgp-signature'
app_pgp_enc = 'application/pgp-encrypted'
# handle OpenPGP signed data
if (m.is_multipart() and
m.get_content_subtype() == 'signed' and
p.get('protocol', None) == app_pgp_sig):
# RFC 3156 is quite strict:
# * exactly two messages
# * the second is of type 'application/pgp-signature'
# * the second contains the detached signature
malformed = False
if len(m.get_payload()) != 2:
malformed = u'expected exactly two messages, got {0}'.format(
len(m.get_payload()))
ct = m.get_payload(1).get_content_type()
if ct != app_pgp_sig:
malformed = u'expected Content-Type: {0}, got: {1}'.format(
app_pgp_sig, ct)
# TODO: RFC 3156 says the alg has to be lower case, but I've
# seen a message with 'PGP-'. maybe we should be more
# permissive here, or maybe not, this is crypto stuff...
if not p.get('micalg', 'nothing').startswith('pgp-'):
malformed = u'expected micalg=pgp-..., got: {0}'.format(
p.get('micalg', 'nothing'))
sigs = []
if not malformed:
try:
sigs = crypto.verify_detached(m.get_payload(0).as_string(),
m.get_payload(1).get_payload())
except GPGProblem as e:
malformed = unicode(e)
add_signature_headers(m, sigs, malformed)
# handle OpenPGP encrypted data
elif (m.is_multipart() and
m.get_content_subtype() == 'encrypted' and
p.get('protocol', None) == app_pgp_enc and
'Version: 1' in m.get_payload(0).get_payload()):
# RFC 3156 is quite strict:
# * exactly two messages
# * the first is of type 'application/pgp-encrypted'
# * the first contains 'Version: 1'
# * the second is of type 'application/octet-stream'
# * the second contains the encrypted and possibly signed data
malformed = False
ct = m.get_payload(0).get_content_type()
if ct != app_pgp_enc:
malformed = u'expected Content-Type: {0}, got: {1}'.format(
app_pgp_enc, ct)
want = 'application/octet-stream'
ct = m.get_payload(1).get_content_type()
if ct != want:
malformed = u'expected Content-Type: {0}, got: {1}'.format(want,
ct)
if not malformed:
try:
sigs, d = crypto.decrypt_verify(m.get_payload(1).get_payload())
except GPGProblem as e:
# signature verification failures end up here too if
# the combined method is used, currently this prevents
# the interpretation of the recovered plain text
# mail. maybe that's a feature.
malformed = unicode(e)
else:
# parse decrypted message
n = message_from_string(d)
# add the decrypted message to m. note that n contains
# all the attachments, no need to walk over n here.
m.attach(n)
# add any defects found
m.defects.extend(n.defects)
# there are two methods for both signed and encrypted
# data, one is called 'RFC 1847 Encapsulation' by
# RFC 3156, and one is the 'Combined method'.
if len(sigs) == 0:
# 'RFC 1847 Encapsulation', the signature is a
# detached signature found in the recovered mime
# message of type multipart/signed.
if X_SIGNATURE_VALID_HEADER in n:
for k in (X_SIGNATURE_VALID_HEADER,
X_SIGNA | TURE_MESSAGE_HEADER):
m[k] = n[k]
e | lse:
# an encrypted message without signatures
# should arouse some suspicion, better warn
# the user
add_signature_headers(m, [], 'no signature found')
else:
# 'Combined method', the signatures are returned
# by the decrypt_verify function.
# note that if we reached this point, we know the
# signatures are valid. if they were not valid,
# the else block of the current try would not have
# been executed
add_signature_headers(m, sigs, '')
if malformed:
msg = u'Malformed OpenPGP message: {0}'.format(malformed)
content = email.message_from_string(msg.encode('utf-8'))
content.set_charset('utf-8')
m.attach(content)
return m
def message_from_string(s):
'''Reads a mail from the given string. This is the equivalent of
:func:`email.message_from_string` wh |
Qwaz/solved-hacking-problem | SSCTF/2016/Crypto&Exploit/HeHeDa/Algorithm1-577265e1.py | Python | gpl-2.0 | 3,529 | 0.001133 | def LShift(t, k):
k %= 8
return ((t << k) | (t >> (8 - k))) & 0xff
def encode(p):
ret = ""
for i in range(8):
ret = ('|' if (p >> i) & 1 else 'O') + ret
return ret
A = [85, 128, 177, 163, 7, 242, 231, 69, 185, 1, 91, 89, 80, 156, 81, 9, 102, 221, 195, 33, 31, 131, 179, 246, 15, 139, 205, 49, 107, 193, 5, 63, 117, 74, 140, 29, 135, 43, 197, 212, 0, 189, 218, 190, 112, 83, 238, 47, 194, 68, 233, 67, 122, 138, 53, 14, 35, 76, 79, 162, 145, 51, 90, 234, 50, 6, 225, 250, 215, 133, 180, 97, 141, 96, 20, 226, 3, 191, 187, 57, 168, 171, 105, 113, 196, 71, 239, 200, 254, 175, 164, 203, 61, 16, 241, 40, 176, 59, 70, 169, 146, 247, 232, 152, 165, 62, 253, 166, 167, 182, 160, 125, 78, 28, 130, 159, 255, 124, 153, 56, 58, 143, 150, 111, 207, 206, 32, 144,
75, 39, 10, 201, 204, 77, 104, 65, 219, 98, 210, 173, 249, 13, 12, 103, 101, 21, 115, 48, 157, 147, 11, 99, 227, 45, 202, 158, 213, 100, 244, 54, 17, 161, 123, 92, 181, 243, 184, 188, 84, 95, 27, 72, 106, 192, 52, 44, 55, 129, 208, 109, 26, 24, 223, 64, 114, 19, 198, 23, 82, 120, 142, 178, 214, 186, 116, 94, 222, 86, 251, 36, 4, 248, 132, 25, 211, 199, 30, 87, 60, 127, 155, 41, 224, 151, 237, 136, 245, 37, 170, 252, 8, 42, 209, 46, 108, 88, 183, 149, 110, 66, 235, 229, 134, 73, 38, 118, 236, 119, 154, 216, 217, 240, 22, 121, 174, 93, 126, 230, 228, 18, 148, 220, 172, 2, 137, 34]
B = [0, 2, 3, 7, 1, 5, 6, 4]
C = | [179, 132, 74, 60, 94, 252, 166, 242, 208, 217, 117, 255, 20, 99, 225, 58, 54, 184, 243, 37, 96, 106, 64, 151, 148, 248, 44, 175, 152, 40, 171, 251, 210, 118, 56, 6, 138, 77, 45, 169, 209, 232, 68, 182, 91, 203, 9, 16, 172, 95, 154, 90, 164, 161, 231, 11, 21, 3, 97, 70, 34, 86, 124, 114, 119, 223, 123, 167, 47, 219, 197, 221, 193, 192, 126, 78, 39, 233, 4, 120, 33, 131, 145, 183, 143, 31 | , 76, 121, 92, 153, 85, 100, 52, 109, 159, 112, 71, 62, 8, 244, 116, 245, 240, 215, 111, 134, 199, 214, 196, 213, 180, 189, 224, 101, 202, 201, 168, 32, 250, 59, 43, 27, 198, 239, 137, 238, 50,
149, 107, 247, 7, 220, 246, 204, 127, 83, 146, 147, 48, 17, 67, 23, 93, 115, 41, 191, 2, 227, 87, 173, 108, 82, 205, 49, 1, 66, 105, 176, 22, 236, 29, 170, 110, 18, 28, 185, 235, 61, 88, 13, 165, 188, 177, 230, 130, 253, 150, 211, 42, 129, 125, 141, 19, 190, 133, 53, 84, 140, 135, 10, 241, 222, 73, 12, 155, 57, 237, 181, 36, 72, 174, 207, 98, 5, 229, 254, 156, 178, 128, 55, 14, 69, 30, 194, 122, 46, 136, 160, 206, 26, 102, 218, 103, 139, 195, 0, 144, 186, 249, 79, 81, 75, 212, 234, 158, 163, 80, 226, 65, 200, 38, 187, 113, 63, 24, 25, 142, 51, 228, 35, 157, 216, 104, 162, 15, 89]
D = [2, 4, 0, 5, 6, 7, 1, 3]
plain = bytearray("asdfghjk123456")
key = bytearray(/*Missed*/)
assert len(key) == 8
t1 = bytearray()
for i in plain:
t1.append(A[i])
t2 = bytearray()
for i in range(len(t1)):
t2.append(LShift(t1[i], B[i % 8]))
for times in range(16):
for i in range(len(t2)):
t2[i] = C[t2[i]]
for i in range(len(t2)):
t2[i] = LShift(t2[i], i ^ D[i % 8])
for i in range(len(t2)):
t2[i] ^= key[i % 8]
out = ""
for i in t2:
out += encode(i)
print out
# out>>
# OO|OO||OO|||||OO|OO||O||O|O||O|||O|OOOOOOO|O|O|O|||||OO|||O|||OO||O|OOOOOO|O|OO|OO||||OO|||OOOO|||||O||||O|OO|O|O|O||OO|O||O|OO|O||O|||O||O|OO|OOOOOO||OOO|O|O|O|||O|OO|O|O||O||O||OOOOO|||OO|O|
# flag >>
# OO||O||O|O|||OOOO||||||O|O|||OOO||O|OOOO||O|O|OO|||||OOOO||||O||OO|OO||O||O|O|O|||||OOOOOO|O|O||OOOOOOO||O|||OOOO||OO|OO|||O|OO|O|||O|O|OO|OOOO|OOO|OOO|OOOO||O|OO||||OO||||OOO|O|O||OO||||O||OOO|||O|OO|OO||OO||OOOO|O|
|
sabirmostofa/app-engine-facebook | lib/twitter.py | Python | lgpl-3.0 | 4,934 | 0.003445 | from lib.oauth2 import Consumer as OAuthConsumer, Token, Request as OAuthRequest, \
SignatureMethod_HMAC_SHA1
from urllib2 import Request, urlopen
from lib import simplejson
import config
# Twitter configuration
TWITTER_SERVER = 'api.twitter.com'
TWITTER_REQUEST_TOKEN_URL = 'https://%s/oauth/request_token' % TWITTER_SERVER
TWITTER_ACCESS_TOKEN_URL = 'https://%s/oauth/access_token' % TWITTER_SERVER
# Note: oauth/authorize forces the user to authorize every time.
# oauth/authenticate uses their previous selection, barring revocation.
TWITTER_AUTHORIZATION_URL = 'http://%s/oauth/authenticate' % TWITTER_SERVER
TWITTER_CHECK_AUTH = 'https://twitter.com/account/verify_credentials.json'
class TwitterAuth(object):
"""Twitter OAuth authentication mechanism"""
AUTHORIZATION_URL = TWITTER_AUTHORIZATION_URL
REQUEST_TOKEN_URL = TWITTER_REQUEST_TOKEN_URL
ACCESS_TOKEN_URL = TWITTER_ACCESS_TOKEN_URL
SERVER_URL = TWITTER_SERVER
AUTH_BACKEND_NAME = 'twitter'
SETTINGS_KEY_NAME = 'TWITTER_CONSUMER_KEY'
SETTINGS_SECRET_NAME = 'TWITTER_CONSUMER_SECRET'
def __init__(self, request, redirect_uri=None):
"""Init method"""
self.request = request
self.redirect_uri = redirect_uri
def auth_url(self):
"""Return redirect url"""
token = self.unauthorized_token()
name = self.AUTH_BACKEND_NAME + 'unauthorized_token_name'
self.request.session[name] = token.to_string()
return str(self.oauth_request(token, self.AUTHORIZATION_URL).to_url())
def auth_complete(self, oauth_token, oauth_verifier):
"""Return user, might be logged in"""
name = self.AUTH_BACKEND_NAME + 'unauthorized_token_name'
unauthed_token = self.request.session[name]
del self.request.session[name]
if not unauthed_token:
raise ValueError('Missing unauthorized token')
token = Token.from_string(unauthed_token)
if token.key != oauth_token:
raise ValueError('Incorrect tokens')
access_token = self.access_token(token, oauth_verifier)
data = self.user_data(access_token)
return data
def save_association_data(self, user_data):
name = self.AUTH_BACKEND_NAME + 'association_data'
self.request.session[name] = simplejson.dumps(user_data)
def get_association_data(self):
n | ame = self.AUTH_BACKEND_NAME + 'association_data'
if name in self.request.session:
association_data = simplejson.loads(self.request.session[name])
| del self.request.session[name]
else:
association_data = None
return association_data
def unauthorized_token(self):
"""Return request for unauthorized token (first stage)"""
request = self.oauth_request(token=None, url=self.REQUEST_TOKEN_URL)
response = self.fetch_response(request)
return Token.from_string(response)
def oauth_request(self, token, url, oauth_verifier=None, extra_params=None):
"""Generate OAuth request, setups callback url"""
params = {}
if self.redirect_uri:
params['oauth_callback'] = self.redirect_uri
if extra_params:
params.update(extra_params)
if oauth_verifier:
params['oauth_verifier'] = oauth_verifier
request = OAuthRequest.from_consumer_and_token(self.consumer,
token=token,
http_url=url,
parameters=params)
request.sign_request(SignatureMethod_HMAC_SHA1(), self.consumer, token)
return request
def fetch_response(self, request):
"""Executes request and fetchs service response"""
response = urlopen(request.to_url())
return '\n'.join(response.readlines())
def access_token(self, token, oauth_verifier):
"""Return request for access token value"""
request = self.oauth_request(token, self.ACCESS_TOKEN_URL, oauth_verifier)
return Token.from_string(self.fetch_response(request))
def user_data(self, access_token):
"""Return user data provided"""
request = self.oauth_request(access_token, TWITTER_CHECK_AUTH)
json = self.fetch_response(request)
try:
return simplejson.loads(json)
except ValueError:
return None
@property
def consumer(self):
"""Setups consumer"""
return OAuthConsumer(*self.get_key_and_secret())
def get_key_and_secret(self):
"""Return tuple with Consumer Key and Consumer Secret for current
service provider. Must return (key, secret), order *must* be respected.
"""
return config.twitter_consumer_key, config.twitter_consumer_secret |
D4TI3A/KhaeraTunnisa1144044 | doc/Kuliah/Tugas2.py | Python | gpl-3.0 | 1,425 | 0.006316 | graph = {
'Sarijadi': ['Jl.Surya Sumantri'],
'Jl.Surya Sumantri': ['Pasteur'],
'Pasteur': ['Jl.Dr.Djunjunan'],
'Jl.Dr.Djunjunan': ['Jl.Pajajaran'],
'Jl.Pajajaran': ['Bandara Husain Sastra Negara'],
'Bandara Husain Sastra Negara': ['Bandara Husain Sastra Negara']
}
def mencari_jalur_terpendek(graph, jalanawal, jalantujuan, jalur=[]):
jalur = jalur + [jalanawal]
if jalanawal == jalantujuan:
return jalur
if not graph.has_key(jalanawal):
return None
jalurpendek = None
for node in graph[jalanawal]:
| if node not in jalur:
newjalur = mencari_jalur_terpendek(graph, node, jalantujuan, jalur)
if newjalur:
if not jalurpendek or len(newjalur) < len(jalurpendek):
jalurpendek = newjalur
return jalurpendek
print("Jalur Jalan Raya Dari Sarijadi Sampai Bandara Husain Sastra Negara Bandung")
|
print("(Sarijadi, Jl.Surya Sumantri, Pasteur, Jl.Dr.Djunjunan, Jl.Pajajaran, Bandara Husain Sastra Negara)")
print("(Khaera Tunnisa 1144044)")
print("\n")
jalanawal = raw_input("Masukan jalanawal : ")
jalantujuan = raw_input("Masukan jalantujuan : ")
hasil = mencari_jalur_terpendek(graph, jalanawal, jalantujuan, jalur=[])
print "Jalur Terpendek", hasil |
anzev/hedwig | hedwig/learners/bottomup.py | Python | mit | 2,529 | 0.001186 | '''
Main learner class.
@author: anze.vavpetic@ijs.si
'''
from collections import defaultdict
from hedwig.core import UnaryPredicate, Rule, Example
from hedwig.core.settings import logger
from hedwig.stats.significance import is_redundant
from hedwig.stats.scorefunctions import interesting
class BottomUpLearner:
'''
Bottom-up learner.
'''
Similarity = 'similarity'
Improvement = 'improvement'
Default = 'default'
def __init__(self, kb, n=None, min_sup=1, sim=1, depth=4, target=None,
use_negations=False):
self.kb = kb
self.n = n # Beam length
self.min_sup = min_sup
self.sim = sim
self.extending = Learner.Improvement
self.depth = depth # Max number of conjunctions
self.use_negations = use_negations
if kb.is_discrete_target():
self.target = list(self.kb.class_values)[0] if not target else target
else:
self.target = None
self.pruned_subclasses = self._pruned_subclasses()
self.pruned_superclasses_closure = self._pruned_superclasses()
self.implicit_roots = self._implicit_roots()
def _pruned_subclasses(self):
min_sup = lambda pre | d: self.kb.n_members(pred) >= self.min_sup
pruned_subclasses = {}
for pred in self.kb.predicates:
| subclasses = self.kb.get_subclasses(pred)
pruned_subclasses[pred] = filter(min_sup, subclasses)
return pruned_subclasses
def _pruned_superclasses(self):
min_sup = lambda pred: self.kb.n_members(pred) >= self.min_sup
pruned_superclasses = {}
for pred in self.kb.predicates:
superclasses = self.kb.super_classes(pred)
pruned_superclasses[pred] = filter(min_sup, superclasses)
return pruned_superclasses
def _implicit_roots(self):
implicit_roots = set()
n_examples = self.kb.n_examples()
for pred in self.kb.predicates:
if self.kb.n_members(pred) == n_examples:
implicit_roots.add(pred)
return implicit_roots
def get_subclasses(self, pred):
return self.pruned_subclasses[pred.label]
def get_superclasses(self, pred):
return self.pruned_superclasses_closure[pred]
def is_implicit_root(self, pred):
return pred in self.implicit_roots
def induce(self):
'''
Induces rules for the given knowledge base.
'''
pass
def bottom_clause(self):
pass
|
JamesChristie/minimax_kata | minimax_kata/interface/char_policy.py | Python | gpl-3.0 | 1,673 | 0.012552 | from minimax_kata.ex | ecutioner import Executione | r
from minimax_kata.interface import WALL_SPACE_CHAR
from minimax_kata.interface import EMPTY_SPACE_CHAR
from minimax_kata.interface import PLAYER_ONE_TRAIL_CHAR
from minimax_kata.interface import PLAYER_TWO_TRAIL_CHAR
from minimax_kata.interface import PLAYER_ONE_CHARS
from minimax_kata.interface import PLAYER_TWO_CHARS
from minimax_kata.interface import CRASH_CHAR
class CharPolicy:
def __init__(self, game, length, width):
self.game = game
self.length = length
self.width = width
self.position = (self.width, self.length)
self.player1 = self.game.player1
self.player2 = self.game.player2
def get_char(self):
if self.__is_collision():
return CRASH_CHAR
elif self.__is_player1():
return PLAYER_ONE_CHARS[self.player1.direction]
elif self.__is_player2():
return PLAYER_TWO_CHARS[self.player2.direction]
elif self.__is_player1_trail():
return PLAYER_ONE_TRAIL_CHAR
elif self.__is_player2_trail():
return PLAYER_TWO_TRAIL_CHAR
else:
return EMPTY_SPACE_CHAR
# Private
def __is_collision(self):
executioner = Executioner(self.game)
return (
executioner.collision_has_ocurred()
and executioner.get_collision_position() == (self.width, self.length)
)
def __is_player1(self):
return self.player1.position == self.position
def __is_player2(self):
return self.player2.position == self.position
def __is_player1_trail(self):
return self.game.get_owner(self.position) is self.player1
def __is_player2_trail(self):
return self.game.get_owner(self.position) is self.player2
|
mdmirabal/Parcial2-Prog3 | main.py | Python | mit | 1,078 | 0.042672 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from kivy.app import App
from kivy.properties import ObjectProperty
from kivy.uix.screenmanager import Screen
from precios import Precio
class Ventana(Screen):
precio = ObjectProperty
origen=""
destino=""
def Origen(self, origen):
self.origen = origen
print (origen)
def Destino(self, destin | o):
self.destino = destino
print (destino)
def CalcularPrecio(self):
if self.origen != "" and self.destino != "":
costo = ""+Precio(self.origen,self.destino)
if costo == "null":
self.precio.text ="No se puede determinar la tarifa para la ruta seleccionada."
else:
| self.precio.text = "El precio es de aproximadamente $"+costo
else:
self.precio.text = "POR FAVOR SELECCIONE UNA RUTA"
class AplicacionApp(App):
def build(self):
return Ventana()
def on_pause(self):
return True
if __name__ == '__main__':
AplicacionApp().run()
|
aldian/tensorflow | tensorflow/python/training/tracking/tracking.py | Python | apache-2.0 | 12,398 | 0.005565 | """Dependency tracking for trackable objects."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from absl import logging
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# global _RESOURCE_TRACKER_STACK
_RESOURCE_TRACKER_STACK = []
class NotTrackable(object):
"""Marks instances of child classes as unsaveable using an object-based API.
Useful for marking objects which would otherwise look trackable because
of inheritance (e.g. through `Layer`) as not trackable. Inheriting from
`NotTrackable` does not prevent an object from being assigned to any
attributes, but will throw an error on save/restore.
"""
pass
class AutoTrackable(base.Trackable):
"""Manages dependencies on other objects.
`Trackable` objects may have dependencies: other `Trackable` objects
which should be saved if the object declaring the dependency is saved. A
correctly saveable program has a dependency graph such that if changing a
global variable affects an object (e.g. changes the behavior of any of its
methods) then there is a chain of dependencies from the influenced object to
the variable.
Dependency edges have names, and are created implicitly when a
`Trackable` object is assigned to an attribute of another
`Trackable` object. For example:
```
obj = Trackable()
obj.v = ResourceVariable(0.)
```
The `Trackable` object `obj` now has a dependency named "v" on a
variable.
`Trackable` objects may specify `Tensor`s to be saved and restored
directly (e.g. a `Variable` indicating how to save itself) rather than through
dependencies on other objects. See
`Trackable._gather_saveables_for_checkpoint` for details.
"""
def __setattr__(self, name, value):
"""Support self.foo = trackable syntax."""
try:
if getattr(self, name) is value:
# Short circuit for `self.$x = self.$x`.
return
except AttributeError:
pass
if getattr(self, "_self_setattr_tracking", True):
value = data_structures.sticky_attribute_assignment(
trackable=self, value=value, name=name)
super(AutoTrackable, self).__setattr__(name, value)
def __delattr__(self, name):
self._maybe_initialize_trackable()
delete_tracking(self, name)
super(AutoTrackable, self).__delattr__(name)
def _no_dependency(self, value):
"""Override to allow TrackableBase to disable dependency tracking."""
return data_structures.NoDependency(value)
def _list_functions_for_serialization(self, unused_serialization_cache):
"""Return a dict of `Function`s of a trackable."""
functions = {}
for attribute_name in dir(self):
# We get the attributes, suppressing warnings and exceptions.
logging_verbosity = logging.get_verbosity()
try:
logging.set_verbosity(logging.FATAL)
attribute_value = getattr(self, attribute_name, None)
except Exception: # pylint: disable=broad-except
# We really don't want to throw an exception just because some object's
# attribute accessor is broken.
attribute_value = None
finally:
# We reset the verbosity setting in a `finally` block, to make
# sure it always happens, even if we make the exception catching above
# be less broad.
logging.set_verbosity(logging_verbosity)
if isinstance(attribute_value, (def_function.Function,
defun.ConcreteFunction)):
functions[attribute_name] = attribute_value
return functions
def delete_tracking(obj, name):
"""Removes the tracking of name from object."""
# pylint: disable=protected-access
if name in obj._unconditional_dependency_names:
del obj._unconditional_dependency_names[name]
for index, (dep_name, _) in enumerate(
obj._unconditional_checkpoint_dependencies):
if dep_name == name:
del obj._unconditional_checkpoint_dependencies[index]
break
# pylint: enable=protected-access
class ResourceTracker(object):
"""An object that tracks a list of resources."""
__slots__ = ["_resources"]
def __init__(self):
self._resources = []
@property
def resources(self):
return self._resources
def add_resource(self, resource):
self._resources.append(resource)
@tf_contextlib.contextmanager
def resource_tracker_scope(resource_tracker):
"""A context to manage resource trackers.
Use this in order to collect up all resources created within a block of code.
Example usage:
```python
resource_tracker = ResourceTracker()
with resource_tracker_scope(resource_tracker):
resource = TrackableResource()
assert resource_tracker.resources == [resource]
Args:
resource_tracker: The passed in ResourceTracker object
Yields:
A scope in which the resource_tracker is active.
"""
global _RESOURCE_TRACKER_STACK
old = list(_RESOURCE_TRACKER_STACK)
_RESOURCE_TRACKER_STACK.append(resource_tracker)
try:
yield
finally:
_RESOURCE_TRACKER_STACK = old
class CapturableResourceDeleter(object):
"""Deleter to destroy CapturableResource without overriding its __del__()."""
__slots__ = ["_destruction_context", "_destroy_resource"]
def __init__(self, destroy_resource_fn=None):
if destroy_resource_fn:
self._destroy_resource = destroy_resource_fn
self._destruction_context = (
context.eager_mode if context.executing_eagerly()
else ops.get_default_graph().as_default)
else:
self._destroy_resource = None
def destroy_resource(self):
if self._destroy_resource:
return self._destroy_resource()
def __del__(self):
if self._destroy_resource:
with s | elf._destruction_context():
self._destroy_resource()
class CapturableResource(base.Trackable):
"""Holds a Tensor which a tf.function can capture.
`CapturableResource`s are discovered by traversing the graph of object
attributes, e.g. during `tf.saved_model.save`. They are excluded from the
scope-base | d tracking of `TrackableResource`; generally things that require
initialization should inherit from `TrackableResource` instead of
`CapturableResource` directly.
"""
def __init__(self, device="", deleter=None):
"""Initialize the `CapturableResource`.
Args:
device: A string indicating a required placement for this resource,
e.g. "CPU" if this resource must be created on a CPU device. A blank
device allows the user to place resource creation, so generally this
should be blank unless the resource only makes sense on one device.
deleter: A CapturableResourceDeleter that will destroy the created
resource during destruction.
"""
self._resource_handle = None
self._resource_device = device
self._resource_deleter = deleter or CapturableResourceDeleter()
def _create_resource(self):
"""A function that creates a resource handle."""
raise NotImplementedError("TrackableResource._create_resource not "
"implemente |
ddimensia/RaceCapture_App | autosportlabs/racecapture/views/configuration/rcp/wirelessconfigview.py | Python | gpl-3.0 | 3,591 | 0.005291 | #
# Race Capture App
#
# Copyright (C) 2014-2016 Autosport Labs
#
# This file is part of the Race Capture App
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should
# have received a copy of the GNU General Public License along with
# this code. If not, see <http://www.gnu.org/licenses/>.
import kivy
kivy.require('1.9.1')
from kivy.app import Builder
from kivy.core.window import Window
from kivy.properties import ObjectProperty
from autosportlabs.racecapture.views.configuration.baseconfigview import BaseConfigView
from autosportlabs.widgets.scrollcontainer import ScrollContainer
from kivy.logger import Logger
from autosportlabs.racecapture.views.configuration.rcp.wireless.bluetoothconfigview import BluetoothConfigView
from autosportlabs.racecapture.views.configuration.rcp.wireless.cellularconfigview import CellularConfigView
from autosportlabs.racecapture.views.configuration.rcp.wireless.wificonfigview import WifiConfigView
WIRELESS_CONFIG_VIEW_KV = 'autosportlabs/racecapture/views/configuration/rcp/wirelessconfigview.kv'
class WirelessConfigView(BaseConfigView):
Builder.load_file(WIRELESS_CONFIG_VIEW_KV)
def __init__(self, base_dir, config, capabilities, **kwargs):
super(WirelessConfigView, self).__init__(**kwargs)
self.register_event_type('on_config_updated')
self.register_event_type('on_config_modified')
self.base_dir = base_dir
self.rcp_capabilities = capabilities
self.rcp_config = config
self._views = []
self._render()
self._attach_event_handlers()
def _render(self):
if not self.rcp_capabilities or (self.rcp_capabilities and self.rcp_capabilities.has_bluetooth):
bluetooth_view = BluetoothConfigView(self.rcp_config)
self.ids.wireless_settings.add_widget(bluetooth_view, index=0)
self._views.append(bluetooth_view)
if not self.rcp_capabilities or (self.rcp_capabilities and self.rcp_capabilities.has_wifi):
wifi_view = WifiConfigView(self.base_dir, self.rcp_config)
self.ids.wireless_settings.add_widget(wifi_view, index=0)
self._views | .append(wifi_view)
if not self.rcp_capabilities or (self.rcp_capabi | lities and self.rcp_capabilities.has_cellular):
cellular_view = CellularConfigView(self.base_dir, self.rcp_config)
self.ids.wireless_settings.add_widget(cellular_view, index=0)
self._views.append(cellular_view)
def _attach_event_handlers(self):
for view in self._views:
view.bind(on_modified=self._on_views_modified)
def on_config_updated(self, config):
# Just destroy everything, re-render
self.rcp_config = config
self._update_view_configs()
def _update_view_configs(self):
for view in self._views:
view.config_updated(self.rcp_config)
def _on_views_modified(self, *args):
Logger.debug("WirelessConfigView: _on_views_modified args: {}".format(args))
Logger.debug("Got view modified")
self.dispatch('on_config_modified')
def on_config_modified(self, *args):
pass
|
codilime/cloudify-rest-client | cloudify_rest_client/blueprints.py | Python | apache-2.0 | 7,791 | 0 | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import tempfile
import shutil
import tarfile
import urllib
import urlparse
import contextlib
from os.path import expanduser
from cloudify_rest_client import bytes_stream_utils
from cloudify_rest_client.responses import ListResponse
class Blueprint(dict):
def __init__(self, blueprint):
self.update(blueprint)
@property
def id(self):
"""
:return: The identifier of the blueprint.
"""
return self.get('id')
@property
def created_at(self):
"""
:return: Timestamp of blueprint creation.
"""
return self.get('created_at')
@property
def main_file_name(self):
"""
:return: Blueprint main file name.
"""
return self.get('main_file_name')
@property
def plan(self):
"""
Gets the plan the blueprint represents: nodes, relationships etc...
:return: The content of the blueprint.
"""
return self.get('plan')
@property
def description(self):
"""
Gets the description of the blueprint
:return: The description of the blueprint.
"""
return self.get('description')
class BlueprintsClient(object):
def __init__(self, api):
self.api = api
@staticmethod
def _tar_blueprint(blueprint_path, tempdir):
blueprint_path = expanduser(blueprint_path)
blueprint_name = os.path.basename(os.path.splitext(blueprint_path)[0])
blueprint_directory = os.path.dirname(blueprint_path)
if not blueprint_directory:
# blueprint path only contains a file name from the local directory
blueprint_directory = os.getcwd()
tar_path = '{0}/{1}.tar.gz'.format(tempdir, blueprint_name)
with tarfile.open(tar_path, "w:gz") as tar:
tar.add(blueprint_directory,
arcname=os.path.basename(blueprint_directory))
return tar_path
def _upload(self, archive_location,
blueprint_id,
application_file_name=None):
query_params = {}
if application_file_name is not None:
query_params['application_file_name'] = \
urllib.quote(application_file_name)
uri = '/blueprints/{0}'.format(blueprint_id)
# For a Windows path (e.g. "C:\aaa\bbb.zip") scheme is the
# drive letter and therefore the 2nd condition is present
if urlparse.urlparse(archive_location).scheme and \
not os.path.exists(archive_location):
# archive location is URL
query_params['blueprint_archive_url'] = archive_location
data = None
else:
# archive location is a system path - upload it in chunks
data = bytes_stream_utils.request_data_file_stream_gen(
archive_location)
return self.api.put(uri, params=query_params, data=data,
expected_status_code=201)
def list(self, _include=None, **kwargs):
"""
Returns a list of currently stored blueprints.
:param _include: List of fields to include in response.
:param kwargs: Optional filter fields. For a list of available fields
see the REST service's models.BlueprintState.fields
:return: Blueprints list.
"""
response = self.api.get('/blueprints',
_include=_include,
params=kwargs)
return ListResponse([Blueprint(item) for item in response['items']],
response['metadata'])
def publish_archive(self, archive_location, blueprint_id,
blueprint_filename=None):
"""
Publishes a blueprint archive to the Cloudify manager.
:param archive_location: Path or Url to the archive file.
:param blueprint_id: Id of the uploaded blueprint.
:param blueprint_filename: The archive's main blueprint yaml filename.
:return: Created blueprint.
Archive file should contain a single directory in which there is a
blueprint file named `blueprint_filename` (if `blueprint_filename`
is None, this value will be passed to the REST service where a
default value should be used).
Blueprint ID parameter is available for specifying the
blueprint's unique Id.
"""
blueprint = self._upload(
archive_location,
blueprint_id=blueprint_id,
application_file_name=blueprint_filename)
return Blueprint(blueprint)
def upload(self, blueprint_path, blueprint_id):
"""
Uploads a blueprint to Cloudify's manager.
:param blueprint_path: Main blueprint yaml file path.
:param blueprint_id: Id of the uploaded blueprint.
:return: Created blueprint.
Blueprint path should point to the main yaml file of the blueprint
to be uploaded. Its containing folder will be packed to an archive
and get uploaded to the manager.
Blueprint ID parameter is available for specifying the
blueprint's unique Id.
"""
tempdir = tempfile.mkdtemp()
try:
tar_path = self._tar_blueprint(blueprint_path, tempdir)
application_file = os.path.basename(blueprint_path)
blueprint = self._upload(
tar_path,
blueprint_id=blueprint_id,
application_file_name=application_file)
return Blueprint(blueprint)
finally:
shutil.rmtree(tempdir)
def get(self, blueprint_id, _include=None):
"""
Gets a blueprint by its id.
:param blueprint_id: Blueprint's id to get.
:param _include: List of fields to include in response.
:return: The blueprint.
"""
assert blueprint_id
uri = '/blueprints/{0}'.format(blueprint_id)
response = self.api.get(uri, _include=_include)
return Blueprint(response)
def delete(self, blueprint_id):
"""
Deletes the blueprint whose id matches the provided blueprint id.
:param blueprint_id: The id of the blueprint to be deleted.
:return: Deleted blueprint.
"""
assert blueprint_id
response = self.api.delete( | '/blueprints/{0}'.format(blueprint_id))
return Blueprint(response)
def download(self, blueprint_id, output_file=None):
"""
Downloads a | previously uploaded blueprint from Cloudify's manager.
:param blueprint_id: The Id of the blueprint to be downloaded.
:param output_file: The file path of the downloaded blueprint file
(optional)
:return: The file path of the downloaded blueprint.
"""
uri = '/blueprints/{0}/archive'.format(blueprint_id)
with contextlib.closing(
self.api.get(uri, stream=True)) as streamed_response:
output_file = bytes_stream_utils.write_response_stream_to_file(
streamed_response, output_file)
return output_file
|
papedaniel/oioioi | oioioi/forum/views.py | Python | gpl-3.0 | 11,557 | 0.000865 | from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.shortcuts import redirect, get_object_or_404
from django.template.response import TemplateResponse
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
from oioioi.base.menu import menu_registry
from oioioi.base.permissions import enforce_condition, not_anonymous
from oioioi.base.utils.confirmation import confirmation_view
from oioioi.contests.utils import contest_exists, can_enter_contest, \
is_contest_admin
from oioioi.contests.menu import contest_admin_menu_registry
from oioioi.forum.models import Category
from oioioi.forum.forms import PostForm, NewThreadForm
from oioioi.forum.utils import forum_exists_and_visible, is_proper_forum, \
is_not_locked, get_forum_ct, get_forum_ctp, get_msgs, forum_is_locked
# registering forum
@menu_registry.register_decorator(_("Forum"), lambda request:
reverse('forum', kwargs={'contest_id': request.contest.id}),
order=500)
@contest_admin_menu_registry.register_decorator(_("Forum"), lambda request:
reverse('oioioiadmin:forum_forum_change',
args=(request.contest.forum.id,)),
order=50)
@enforce_condition(contest_exists & can_enter_contest)
@enforce_condition(forum_exists_and_visible & is_proper_forum)
def forum_view(request):
msgs = get_msgs(request)
category_set = request.contest.forum.category_set \
.prefetch_related('thread_set', 'thread_set__post_set') \
.all()
return TemplateResponse(request, 'forum/forum.html', {
'forum': request.contest.forum, 'msgs': msgs,
'is_locked': forum_is_locked(request), 'category_set': category_set
})
@enforce_condition(contest_exists & can_enter_contest)
@enforce_condition(forum_exists_and_visible & is_proper_forum)
def category_view(request, category_id):
category = get_object_or_404(Category, id=category_id)
msgs = get_msgs(request)
threads = category.thread_set \
.prefetch_related('post_set') \
.select_related('last_post', 'last_post__author') \
.all()
return TemplateResponse(request, 'forum/category.html',
{'forum': request.contest.forum, 'category': category,
'threads': threads, 'msgs': msgs,
'is_locked': forum_is_locked(request)})
@enforce_condition(contest_exists & can_enter_contest)
@enforce_condition(forum_exists_and_visible & is_proper_forum)
def thread_view(request, category_id, thread_id):
category, thread = get_forum_ct(category_id, thread_id)
forum, lock = request.contest.forum, forum_is_locked(request)
msgs = get_msgs(request)
post_set = thread.post_set.select_related('author').all()
if (request.user.is_authenticated() and not lock) or \
is_contest_admin(request):
if request.method == "POST":
form = PostForm(request, request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.author = request.user
instance.thread = thread
instance.add_date = request.timestamp
instance.save()
return redirect('forum_thread', contest_id=request.contest.id,
category_id=category.id,
thread_id=thread.id)
else:
form = PostForm(request)
return TemplateResponse(request, 'forum/thread.html', |
{'forum': forum, 'category': category, 'thread': thread,
'form': form, 'msgs': msgs, 'is_locked': lock,
'post_set': post_set})
else:
return TemplateResponse(request, 'forum/thread.html',
{'forum': forum, 'category': category, 'thread': thread,
'msgs': msgs, 'is_locked': lock, 'post_set': post_set})
@enfo | rce_condition(not_anonymous & contest_exists & can_enter_contest)
@enforce_condition(forum_exists_and_visible & is_proper_forum & is_not_locked)
def thread_add_view(request, category_id):
category = get_object_or_404(Category, id=category_id)
msgs = get_msgs(request)
if request.method == 'POST':
form = NewThreadForm(request, request.POST)
if form.is_valid(): # adding the new thread
instance = form.save(commit=False)
instance.category = category
instance.save()
post = PostForm(request, request.POST)
if post.is_valid(): # adding the new post
inst_post = post.save(commit=False)
inst_post.author = request.user
inst_post.thread = instance
inst_post.add_date = request.timestamp
inst_post.save()
return redirect('forum_thread', contest_id=request.contest.id,
category_id=category.id,
thread_id=instance.id)
else:
form = NewThreadForm(request)
return TemplateResponse(request, 'forum/thread_add.html',
{'forum': request.contest.forum, 'category': category,
'form': form, 'msgs': msgs})
@enforce_condition(not_anonymous & contest_exists & can_enter_contest)
@enforce_condition(forum_exists_and_visible & is_proper_forum & is_not_locked)
def edit_post_view(request, category_id, thread_id, post_id):
(category, thread, post) = get_forum_ctp(category_id, thread_id, post_id)
msgs = get_msgs(request)
is_admin = is_contest_admin(request)
if post.author != request.user and not is_admin:
raise PermissionDenied
if request.method == 'POST':
form = PostForm(request, request.POST, instance=post)
if form.is_valid():
instance = form.save(commit=False)
instance.last_edit_date = request.timestamp
instance.save()
return redirect('forum_thread', contest_id=request.contest.id,
category_id=category.id,
thread_id=thread.id)
else:
form = PostForm(request, instance=post)
return TemplateResponse(request, 'forum/edit_post.html',
{'forum': request.contest.forum, 'category': category,
'thread': thread, 'form': form, 'post': post, 'msgs': msgs})
@enforce_condition(not_anonymous & contest_exists & can_enter_contest)
@enforce_condition(forum_exists_and_visible & is_proper_forum & is_not_locked)
def delete_post_view(request, category_id, thread_id, post_id):
(category, thread, post) = get_forum_ctp(category_id, thread_id, post_id)
is_admin = is_contest_admin(request)
if not is_admin and \
(post.author != request.user or
(post.author == request.user and
(thread.post_set.filter(add_date__gt=post.add_date).exists() or
not post.can_be_removed()))):
# author: if there are other posts added later or timedelta is gt 15min
# if user is not the author of the post or forum admin
raise PermissionDenied
else:
choice = confirmation_view(request, 'forum/confirm_delete.html',
{'elem': post})
if not isinstance(choice, bool):
return choice
if choice:
post.delete()
if not thread.post_set.exists():
thread.delete()
return redirect('forum_category',
contest_id=request.contest.id,
category_id=category.id)
return redirect('forum_thread', contest_id=request.contest.id,
category_id=category.id, thread_id=thread.id)
@enforce_condition(not_anonymous & contest_exists & can_enter_contest)
@enforce_condition(forum_exists_and_visible & is_proper_forum)
@require_POST
def report_post_view(request, category_id, thread_id, post_id):
(category, thread, post) = get_forum_ctp(category_id, thread_id, post_id)
post.reported = True
post.save()
return redirect('forum_thread', contest_id=request.contest.id,
category_id=category.id, thread_id=thread.id)
@enforce_condition(contest_exists & is_contest_admin)
@enforce_condition(forum_exists_and_ |
xandmaga/migracao_py-upsert | postgres-list-table.py | Python | mit | 6,300 | 0.012857 | import psycopg2
import collections
import json
import sys
def consulta(query):
cursor.execute(query)
return cursor.fetchall()
def constroi_consulta_lista(lista_tabelas):
tabelas = ""
for tabela in lista_tabelas:
tabelas = tabelas + "'" + tabela + "',"
query = "SELECT distinct cl2.relname AS ref_table FROM pg_constraint as co JOIN pg_class AS cl1 ON co.conrelid=cl1.oid JOIN pg_class AS cl2 ON co.confrelid=cl2.oid WHERE co.contype='f' AND cl1.relname in (" + tabelas + ") AND cl2.relname <> cl1.relname ORDER BY cl2.relname"
return query.replace(",)",")")
def constroi_consulta(tabela):
tabela = "'" + tabela + "'"
query = "SELECT distinct cl2.relname AS ref_table FROM pg_constraint as co JOIN pg_class AS cl1 ON co.conrelid=cl1.oid JOIN pg_class AS cl2 ON co.confrelid=cl2.oid WHERE co.contype='f' AND cl1.relname = " + tabela + " AND cl2.relname <> cl1.relname ORDER BY cl2.relname"
return query
def convert_tupla_lista(lista_tupla):
lista_temp = []
for tupla in lista_tupla:
lista_temp = lista_temp + [tupla[0]]
return lista_temp
def consulta_tabelas_dependentes_lista(lista_tabelas, lista_tabelas_resultado):
lista_tabelas_temp = []
if (lista_tabelas is not None) and (lista_tabelas):
lista_tabelas_temp = convert_tupla_lista(consulta(constroi_consulta_lista(lista_tabelas=lista_tabelas)), )
[lista_tabelas_resultado.append(item) if item not in lista_tabelas_resultado else None for item in lista_tabelas_temp]
return consulta_tabelas_dependentes_lista(lista_tabelas=lista_tabelas_temp, lista_tabelas_resultado=lista_tabelas_resultado)
else:
return lista_tabelas_resultado
def le_arquivo_json(filename):
print(filename)
f = open(filename, 'r')
lista = []
for row in f:
lista.append(json.loads(row.replace('\\\\','\\')))
return lista
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
'''
lista_tabelas_resultado = []
print(consulta_tabelas_dependentes_lista(lista_tabelas=["tb_classe_judicial"], lista_tabelas_resultado=lista_tabelas_resultado))
lista_tabelas_resultado = []
print(consulta_tabelas_dependentes_lista(lista_tabelas=["tb_assunto_trf"], lista_tabelas_resultado=lista_tabelas_resultado))
lista_tabelas_resultado = []
print(consulta_tabelas_dependentes_lista(lista_tabelas=["tb_competencia"], lista_tabelas_resultado=lista_tabelas_resultado))
lista_tabelas_resultado = []
print(consulta_tabelas_dependentes_lista(lista_tabelas=["tb_orgao_julgador"], lista_tabelas_resultado=lista_tabelas_resultado))
lista_tabelas_resultado = []
print(consulta_tabelas_dependentes_lista(lista_tabelas=["tb_jurisdicao"], lista_tabelas_resultado=lista_tabelas_resultado))
'''
from upsert import Upsert
import traceback
def migra_tabela(tabela):
try:
json_id_tabela = le_arquivo_json(tabela + "_ids.json")
json_tabela = le_arquivo_json(tabela + ".json")
linhas = len(json_id_tabela)
upsert = Upsert(cursor, tabela)
i=0
while i < linhas:
upsert.row(json_id_tabela[i] , json_tabela[i])
i = i + 1
return True
except:
traceback.print_exc(file=sys.stdout)
return False
def desabilita_triggers(tabela):
cursor.execute("ALTER TABLE " + tabela + " DISABLE TRIGGER ALL;")
def habilita_triggers(tabela):
cursor.execute("ALTER TABLE " + tabela + " ENABLE TRIGGER ALL;")
def migra_linha():
upsert = Upsert(cursor, "tb_endereco" )
upsert.row({'id_endereco': 100054} , {'nm_logradouro': "RUA HERACLITO", 'id_cep': 365878})
def migra_tabelas(lista_tabelas):
return [migra_tabela(tabela) for tabela in lista_tabelas]
''' Conexao pjesup
pjesupconn = psycopg2.connect("dbname=pje user=pjeadmin password=pj3adm1n-TJMG host=linbdpje-5 port=5432")
pjesupcursor = pjesupconn | .cursor()
cursor = pjesupcursor
'''
#Conexao pjetst
pjetstconn = psycopg2.connect("dbname=pj | e user=pjeadmin password=pj3adm1n-TJMG host=linbdpje-10 port=5432")
pjetstcursor = pjetstconn.cursor()
cursor = pjetstcursor
''' conexao pjetstcasa
pje_local_conn = psycopg2.connect("dbname=pje user=postgres password=123456 host=localhost port=5432")
pje_local_cursor = pje_local_conn.cursor()
cursor = pje_local_cursor
'''
''' # conexao pjetstlocal
pje_tstlocal_conn = psycopg2.connect("dbname=pjetst user=postgres password=Postgres1234 host=localhost port=5432")
pje_tstlocal_cursor = pje_tstlocal_conn.cursor()
cursor = pje_tstlocal_cursor
'''
cursor.execute("set search_path = public, acl, core, client, criminal, jt; SET CONSTRAINTS ALL DEFERRED;")
lista_tabelas = ['tb_classe_judicial','tb_assunto_trf','tb_competencia','tb_orgao_julgador']
lista_tabelas.reverse()
migra_tabelas(lista_tabelas)
#migra_tabela("tb_classe_judicial")
|
buckinha/gravity | deprecated/test_script_optimizer2.py | Python | mpl-2.0 | 1,197 | 0.016708 | from FireGirlOptimizer import *
FGPO = FireGirlPolicyOptimizer()
###To create, uncomment the following two lines:
FGPO.createFireGirlPathways(10,50)
#FGPO.saveFireGirlPathways("FG_pathways_20x50.fgl")
###To load (already created data), uncomment the following line
#FGPO.loadFireGirlPathways("FG_pathways_20x50.fgl")
#Setting Flags
FGPO.NORMALIZED_WEIGHTS_OBJ_FN = False
FGPO.NORMALIZED_WEI | GHTS_F_PRIME = False
FGPO.AVERAGED_WEIGHTS_OBJ_FN = True
FGPO.AVERAGED_WEIGHTS_F_PRIME = True
print(" ")
print("Initial Values")
print("objfn: " + str(FGPO.calcObjFn()))
print("fprme: " + str(FGPO.calcObjFPrime()))
print("weights: " + str(FGPO.pathway_weights))
print("net values: " + str(FGPO.pathway_net_values))
#setting new policy
b = [0,0,0,0,0,0,0,0,0,0,0]
pol = FireGirlPolicy(b)
FGPO.setPolicy(pol)
print(" ")
###To Optimize, uncomment | the following
print("Beginning Optimization Routine")
FGPO.USE_AVE_PROB = False
output=FGPO.optimizePolicy()
FGPO.printOptOutput(output)
print(" ")
print("Final Values")
print("objfn: " + str(FGPO.calcObjFn()))
print("fprme: " + str(FGPO.calcObjFPrime()))
print("weights: " + str(FGPO.pathway_weights))
print("net values: " + str(FGPO.pathway_net_values)) |
amolenaar/gaphor | gaphor/RAAML/fta/basicevent.py | Python | lgpl-2.1 | 1,914 | 0.000522 | """Basic Event item definition."""
from gaphas.geometry import Rectangle
from gaphas.util import path_ellipse
from gaphor.core.modeling import DrawContext
from gaphor.diagram.presentation import (
Classified,
ElementPresentation,
from_package_str,
)
from gaphor.diagram.shapes import Box, IconBox, Text, stroke
from gaphor.diagram.support import represents
from gaphor.diagram.text import FontStyle, FontWeight
from gaphor.RAAML import raaml
from gaphor.RAAML.fta.constants import DEFAULT_FTA_MAJOR
from gaphor.UML.modelfactory import stereotypes_str
@represents(raaml.BasicEvent)
class BasicEventItem(ElementPresentation, Classified):
def __init__(self, diagram, id=None):
super().__init__(diagram, id, width=DEFAULT_FTA_MAJOR, height=DEFAULT_FTA_MAJOR)
self.watch("subject[NamedElement].name").watch(
| "subject[NamedElement].namespace.name"
)
def update_shapes(self | , event=None):
self.shape = IconBox(
Box(
draw=draw_basic_event,
),
Text(
text=lambda: stereotypes_str(self.subject, ["BasicEvent"]),
),
Text(
text=lambda: self.subject.name or "",
width=lambda: self.width - 4,
style={
"font-weight": FontWeight.BOLD,
"font-style": FontStyle.NORMAL,
},
),
Text(
text=lambda: from_package_str(self),
style={"font-size": "x-small"},
),
)
def draw_basic_event(box, context: DrawContext, bounding_box: Rectangle):
cr = context.cairo
cr.move_to(bounding_box.width, bounding_box.height)
path_ellipse(
cr,
bounding_box.width / 2.0,
bounding_box.height / 2.0,
bounding_box.width,
bounding_box.height,
)
stroke(context)
|
yuchou/xblog | blog/templatetags/__init__.py | Python | mit | 91 | 0.010989 | #!/u | sr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: yuchou
@time: 2017/8/7 1 | 0:28
""" |
PascualArroyo/Domotics | Raspberry/rele.py | Python | gpl-2.0 | 1,058 | 0.038752 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import deviceConfig
import time
class Rele:
#Rele
valueRele = 0
def __init__(self):
| GPIO.setup(deviceConfig.pinRele, GPIO.OUT)
GPIO.setup(devi | ceConfig.pinReleLed, GPIO.OUT)
GPIO.setup(deviceConfig.pinReleButton, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(deviceConfig.pinReleButton, GPIO.FALLING, callback=self.buttonReleCallback, bouncetime=500)
def getValue(self):
return self.valueRele
def setValue(self, value):
self.valueRele = int(value)
GPIO.output(deviceConfig.pinRele, self.valueRele)
GPIO.output(deviceConfig.pinReleLed, self.valueRele)
return self.valueRele
#Button rele
def buttonReleCallback(self, channel):
print "Rele"
time.sleep(0.2)
if GPIO.input(channel) == GPIO.LOW:
print "Rele confirmacion"
if self.valueRele == 0:
self.valueRele = 1
else:
self.valueRele = 0
GPIO.output(deviceConfig.pinRele, self.valueRele)
GPIO.output(deviceConfig.pinReleLed, self.valueRele)
|
SKIRT/PTS | magic/tests/base.py | Python | agpl-3.0 | 22,419 | 0.003167 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
from abc import ABCMeta, abstractmethod
import math
import numpy as np
# Import astronomical modules
from astropy.modeling.models import Gaussian2D, AiryDisk2D
from photutils.datasets import make_noise_image
# Import the relevant PTS classes and modules
from pts.core.basics.log import log
from pts.do.commandline import Command
from pts.core.test.implementation import TestImplementation
from pts.magic.core.frame import Frame
from pts.core.tools import filesystem as fs
from pts.magic.core.dataset import DataSet
from pts.core.units.parsing import parse_unit as u
from pts.modeling.tests.base import m81_data_path
from pts.magic.catalog.fetcher import CatalogFetcher
from pts.magic.basics.coordinate import SkyCoordinate
from pts.core.tools import stringify
from pts.magic.tools import wavelengths
from pts.magic.tools import fitting, statistics
from pts.magic.convolution.kernels import has_variable_fwhm, get_fwhm
from pts.magic.basics.vector import Pixel
from pts.magic.basics.coordinate import PixelCoordinate
from pts.magic.core.list import CoordinateSystemList
# -----------------------------------------------------------------
description = "Test the source detection and extraction"
# -----------------------------------------------------------------
# For M81
fwhms = {"2MASS H": 4.640929858306589 * u("arcsec"),
"2MASS J": 4.580828087551186 * u("arcsec"),
"2MASS Ks": 4.662813601376219 * u("arcsec"),
"SDSS g": 2.015917936060279 * u("arcsec"),
"SDSS i": 1.85631074608032 * u("arcsec"),
"SDSS r": 2.026862297071852 * u("arcsec"),
"SDSS u": 2.327165667182196 * u("arcsec"),
"SDSS z": 1.841443699129355 * u("arcsec")}
# -----------------------------------------------------------------
# Determine the path to the headers directory
headers_path = fs.join(m81_data_path, "headers")
# -----------------------------------------------------------------
class SourcesTestBase(TestImplementation):
"""
This class ...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
"""
# Call the constructor of the base class
super(S | ourcesTestBase, self).__init__(*args, **kwargs)
# The remote
self.remote = None
# Paths
self.data_path = None
self.data_frames_path = None
self.data_masks_path = None
self.find_path = None
self.find_paths = dict()
self.extract_p | ath = None
self.extract_paths = dict()
# The coordinate systems
self.coordinate_systems = CoordinateSystemList()
# The frames
self.frames = dict()
# The dataset
self.dataset = None
# The catalog fetcher
self.fetcher = CatalogFetcher()
# Catalogs
self.point_source_catalog = None
# The real FWHMs
self.real_fwhms = dict()
# The source finder
self.finder = None
# The source extractors
self.extractors = dict()
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(SourcesTestBase, self).setup(**kwargs)
# Set paths
self.data_path = fs.create_directory_in(self.path, "data")
self.data_frames_path = fs.create_directory_in(self.data_path, "frames")
self.data_masks_path = fs.create_directory_in(self.data_path, "masks")
self.find_path = fs.create_directory_in(self.path, "find")
self.extract_path = fs.create_directory_in(self.path, "extract")
# -----------------------------------------------------------------
def initialize_frames(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Initializing the frames ...")
# Loop over the filters
for fltr in self.coordinate_systems.filters:
# Debugging
log.debug("Initializing the '" + str(fltr) + "' frame ...")
# Get the wcs
wcs = self.coordinate_systems[fltr]
# Create new frame
frame = Frame.zeros(wcs.shape)
# Add the wcs
frame.wcs = wcs
# Set the filter
frame.filter = fltr
# Set the unit
frame.unit = "Jy"
# Add the frame
self.frames[fltr] = frame
# -----------------------------------------------------------------
def set_fwhms(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting the FWHMs ...")
# Loop over the filters
for fltr in self.frames:
# Get the fwhm
if has_variable_fwhm(fltr): fwhm = fwhms[str(fltr)]
else: fwhm = get_fwhm(fltr)
# Debugging
log.debug("The FWHM of the '" + str(fltr) + "' image is " + stringify.stringify(fwhm)[1])
# Set
self.real_fwhms[fltr] = fwhm
# -----------------------------------------------------------------
@property
def star_filters(self):
"""
This function ...
:return:
"""
filters = []
# Loop over the filters
for fltr in self.frames:
# Get wavelength
wavelength = fltr.effective if fltr.effective is not None else fltr.center
# Check
if wavelength > wavelengths.ranges.ir.mir.max: continue
filters.append(fltr)
# Return the filters
return filters
# -----------------------------------------------------------------
@property
def extra_filters(self):
"""
This function ...
:return:
"""
filters = []
# Loop over the filters
for fltr in self.frames:
# Get wavelength
wavelength = fltr.effective if fltr.effective is not None else fltr.center
# Check
if wavelength < wavelengths.ranges.ir.mir.max: continue
filters.append(fltr)
# Return the filters
return filters
# -----------------------------------------------------------------
def create_random_sources(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating random point sources ...")
# Generate random coordinates
right_ascensions = np.random.uniform(self.coordinate_systems.min_ra_deg, self.coordinate_systems.max_ra_deg, size=self.config.nrandom_sources)
declinations = np.random.uniform(self.coordinate_systems.min_dec_deg, self.coordinate_systems.max_dec_deg, size=self.config.nrandom_sources)
# Loop over the coordinates
for ra, dec in zip(right_ascensions, declinations):
# Create a sky coordinate
coordinate = SkyCoordinate(ra=ra, dec=dec, unit="deg")
# Add to the point source catalog
self.point_source_catalog.add_coordinate(coordinate)
# -----------------------------------------------------------------
def make_point_sources(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Making point sources ...")
# Call the appropriate function
if self.config. |
dave-shawley/vetoes | tests/feature_flag_tests.py | Python | bsd-3-clause | 751 | 0 | import unittest
import helper.config
import mock
from vetoes import config
class FeatureFlagMixinTests(unittest.TestCase):
def test_that_flags_are_processed_during_initialize(self):
settings = helper.config.Data({
'fe | atures': {'on': 'on', 'off': 'false'}
| })
consumer = config.FeatureFlagMixin(settings, mock.Mock())
self.assertTrue(consumer.feature_flags['on'])
self.assertFalse(consumer.feature_flags['off'])
def test_that_invalid_flags_arg_ignored(self):
settings = helper.config.Data({
'features': {'one': 'not valid', 'two': None}
})
consumer = config.FeatureFlagMixin(settings, mock.Mock())
self.assertEqual(consumer.feature_flags, {})
|
odrotleff/ROOTPWA | pyInterface/package/utils/_parseUtils.py | Python | gpl-3.0 | 3,290 | 0.029787 |
import glob
import os
import sys
import pyRootPwa
import pyRootPwa.utils
def parseMassBinArgs(allMassBins, massBinArg):
massBins = []
if massBinArg == "all":
massBins = allMassBins
elif ("-" in massBinArg) or ("," in massBinArg):
rawMassBinIndices = | massBinArg.split(",")
massBinIndices = []
for massBin | Index in rawMassBinIndices:
if "-" in massBinIndex:
(lb, tmp, ub) = massBinIndex.partition("-")
try:
lb = int(lb)
ub = int(ub)
except ValueError:
return []
for i in range(lb, ub+1):
massBinIndices.append(i)
else:
try:
mbi = int(massBinIndex)
except ValueError:
return []
massBinIndices.append(mbi)
for index in massBinIndices:
try:
massBins.append(allMassBins[index-1])
except IndexError:
pyRootPwa.utils.printErr("Mass bin command line option out of range. Aborting...")
sys.exit(1)
else:
try:
mbi = int(massBinArg)
massBins.append(allMassBins[mbi-1])
except ValueError:
return []
except IndexError:
pyRootPwa.utils.printErr("Mass bin command line option out of range. Aborting...")
sys.exit(1)
return massBins
def _evtOrRoot(filename):
if os.path.isfile(filename + ".root"):
filename += ".root"
elif os.path.isfile(filename + ".evt"):
filename += ".evt"
else:
filename = ""
return filename
def getListOfInputFiles(massBins):
inputDataFiles = []
inputPSFiles = []
inputAccPSFiles = []
dataFileExtensionQualifier = pyRootPwa.config.dataFileExtensionQualifier
phaseSpaceEventFileExtensionQualifier = pyRootPwa.config.phaseSpaceEventFileExtensionQualifier
accCorrPSEventFileExtensionQualifier = pyRootPwa.config.accCorrPSEventFileExtensionQualifier
for massBin in massBins:
inputFile = massBin + "/" + massBin.rsplit('/', 1)[-1]
if dataFileExtensionQualifier != "":
inputFile += "." + dataFileExtensionQualifier
inputFile = _evtOrRoot(inputFile)
if inputFile:
inputDataFiles.append(inputFile)
else:
pyRootPwa.utils.printWarn('Mass bin "' + massBin + '" does not contain data input file "' + inputFile + '{.root/.evt}".')
if phaseSpaceEventFileExtensionQualifier != "":
inputFile = massBin + "/" + massBin.rsplit('/', 1)[-1] + "." + phaseSpaceEventFileExtensionQualifier
inputFile = _evtOrRoot(inputFile)
if inputFile:
inputPSFiles.append(inputFile)
if accCorrPSEventFileExtensionQualifier != "":
inputFile = massBin + "/" + massBin.rsplit('/', 1)[-1] + "." + accCorrPSEventFileExtensionQualifier
inputFile = _evtOrRoot(inputFile)
if inputFile:
inputAccPSFiles.append(inputFile)
return (inputDataFiles, inputPSFiles, inputAccPSFiles)
def getListOfKeyfiles(keyfilePatterns):
keyfiles = []
for keyfilePattern in keyfilePatterns:
keyfilePattern = os.path.expanduser(os.path.expandvars(keyfilePattern))
if os.path.isdir(keyfilePattern):
keyfiles += glob.glob(keyfilePattern + "/*.key")
elif os.path.isfile(keyfilePattern) and keyfilePattern.endswith(".key"):
keyfiles.append(keyfilePattern)
else:
globbedKeyfiles = glob.glob(keyfilePattern)
for keyfile in globbedKeyfiles:
if os.path.isfile(keyfile) and keyfile.endswith(".key"):
keyfiles.append(keyfile)
else:
pyRootPwa.utils.printWarn("Keyfile " + keyfile + " is not valid. Skipping...")
return keyfiles
|
technige/py2neo | py2neo/cypher/queries.py | Python | apache-2.0 | 7,877 | 0.002158 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2021, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from py2neo.cypher import cypher_join, cypher_escape, cypher_repr, CypherExpression
def unwind_create_nodes_query(data, labels=None, keys=None):
""" Generate a parameterised ``UNWIND...CREATE`` query for bulk
loading nodes into Neo4j.
:param data:
:param labels:
:param keys:
:return: (query, parameters) tuple
"""
return cypher_join("UNWIND $data AS r",
_create_clause("_", (tuple(labels or ()),)),
_set_properties_clause("r", keys),
data=list(data))
def unwind_merge_nodes_query(data, merge_key, labels=None, keys=None, preserve=None):
""" Generate a parameterised ``UNWIND...MERGE`` query for bulk
loading nodes into Neo4j.
:param data:
:param merge_key:
:param labels:
:param keys:
:param preserve:
Collection of key names for values that should be protected
should the node already exist.
:return: (query, parameters) tuple
"""
return cypher_join("UNWIND $data AS r",
_merge_clause("_", merge_key, "r", keys),
_on_create_set_properties_clause("r", keys, preserve),
_set_properties_clause("r", keys, exclude_keys=preserve),
_set_labels_clause(labels),
data=list(data))
def unwind_create_relationships_query(data, rel_type, start_node_key=None, end_node_key=None,
keys=None):
""" Generate a parameterised ``UNWIND...CREATE`` query for bulk
loading relationships into Neo4j.
:param data:
:param rel_type:
:param start_node_key:
:param end_node_key:
:param keys:
:return: (query, parameters) tuple
"""
return cypher_join("UNWIND $data AS r",
_match_clause("a", start_node_key, "r[0]"),
_match_clause("b", end_node_key, "r[2]"),
_create_clause("_", rel_type, "(a)-[", "]->(b)"),
_set_properties_clause("r[1]", keys),
data=_relationship_data(data))
def unwind_merge_relationships_query(data, merge_key, start_node_key=None, end_node_key=None,
keys=None, preserve=None):
""" Generate a parameterised ``UNWIND...MERGE`` query for bulk
loading relationships into Neo4j.
:param data:
:par | am merge_key: tuple | of (rel_type, key1, key2...)
:param start_node_key:
:param end_node_key:
:param keys:
:param preserve:
Collection of key names for values that should be protected
should the relationship already exist.
:return: (query, parameters) tuple
"""
return cypher_join("UNWIND $data AS r",
_match_clause("a", start_node_key, "r[0]"),
_match_clause("b", end_node_key, "r[2]"),
_merge_clause("_", merge_key, "r[1]", keys, "(a)-[", "]->(b)"),
_on_create_set_properties_clause("r[1]", keys, preserve),
_set_properties_clause("r[1]", keys, exclude_keys=preserve),
data=_relationship_data(data))
class NodeKey(object):
def __init__(self, node_key):
if isinstance(node_key, tuple):
self.__pl = node_key[0]
self.__pk = ()
for pk in node_key[1:]:
if isinstance(pk, tuple):
self.__pk += pk
else:
self.__pk += (pk,)
else:
self.__pl, self.__pk = node_key, ()
if not isinstance(self.__pl, tuple):
self.__pl = (self.__pl or "",)
def label_string(self):
label_set = set(self.__pl)
return "".join(":" + cypher_escape(label) for label in sorted(label_set))
def keys(self):
return self.__pk
def key_value_string(self, value, ix):
return ", ".join("%s:%s[%s]" % (cypher_escape(key), value, cypher_repr(ix[i]))
for i, key in enumerate(self.__pk))
def _create_clause(name, node_key, prefix="(", suffix=")"):
return "CREATE %s%s%s%s" % (prefix, name, NodeKey(node_key).label_string(), suffix)
def _match_clause(name, node_key, value, prefix="(", suffix=")"):
if node_key is None:
# ... add MATCH by id clause
return "MATCH %s%s%s WHERE id(%s) = %s" % (prefix, name, suffix, name, value)
else:
# ... add MATCH by label/property clause
nk = NodeKey(node_key)
n_pk = len(nk.keys())
if n_pk == 0:
return "MATCH %s%s%s%s" % (
prefix, name, nk.label_string(), suffix)
elif n_pk == 1:
return "MATCH %s%s%s {%s:%s}%s" % (
prefix, name, nk.label_string(), cypher_escape(nk.keys()[0]), value, suffix)
else:
return "MATCH %s%s%s {%s}%s" % (
prefix, name, nk.label_string(), nk.key_value_string(value, list(range(n_pk))),
suffix)
def _merge_clause(name, merge_key, value, keys, prefix="(", suffix=")"):
nk = NodeKey(merge_key)
merge_keys = nk.keys()
if len(merge_keys) == 0:
return "MERGE %s%s%s%s" % (
prefix, name, nk.label_string(), suffix)
elif keys is None:
return "MERGE %s%s%s {%s}%s" % (
prefix, name, nk.label_string(), nk.key_value_string(value, merge_keys), suffix)
else:
return "MERGE %s%s%s {%s}%s" % (
prefix, name, nk.label_string(), nk.key_value_string(value, [keys.index(key) for key in
merge_keys]), suffix)
def _set_labels_clause(labels):
if labels:
return "SET _%s" % NodeKey((tuple(labels),)).label_string()
else:
return None
def _set_properties_clause(expr, keys, exclude_keys=()):
if keys is None:
# data is list of dicts
return "SET _ += %s" % expr
else:
# data is list of lists
d = OrderedDict()
for i, key in enumerate(keys):
if exclude_keys and key in exclude_keys:
continue
d[key] = CypherExpression("%s[%d]" % (expr, i))
return "SET _ += " + cypher_repr(d)
def _on_create_set_properties_clause(expr, all_keys, keys):
if keys is None:
return None
else:
# data is list of lists
d = OrderedDict()
for i, key in enumerate(all_keys):
if key in keys:
d[key] = CypherExpression("%s[%d]" % (expr, i))
return "ON CREATE SET _ += " + cypher_repr(d)
def _relationship_data(data):
norm_data = []
for item in data:
start_node, detail, end_node = item
norm_start = type(start_node) is tuple and len(start_node) == 1
norm_end = type(end_node) is tuple and len(end_node) == 1
if norm_start and norm_end:
norm_data.append((start_node[0], detail, end_node[0]))
elif norm_start:
norm_data.append((start_node[0], detail, end_node))
elif norm_end:
norm_data.append((start_node, detail, end_node[0]))
else:
norm_data.append(item)
return norm_data
|
ssaavedra/couchdb-python | couchdb/mapping.py | Python | bsd-3-clause | 22,390 | 0.001385 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Mapping from raw JSON data structures to Python objects and vice versa.
>>> from couchdb import Server
>>> server = Server()
>>> db = server.create('python-tests')
To define a document mapping, you declare a Python class inherited from
`Document`, and add any number of `Field` attributes:
>>> from datetime import datetime
>>> from couchdb.mapping import Document, TextField, IntegerField, DateTimeField
>>> class Person(Document):
... name = TextField()
... age = IntegerField()
... added = DateTimeField(default=datetime.now)
>>> person = Person(name='John Doe', age=42)
>>> person.store(db) #doctest: +ELLIPSIS
<Person ...>
>>> person.age
42
You can then load the data from the CouchDB server through your `Document`
subcl | ass, and conveniently access all attributes:
>>> person = Person.load(db, person.id)
>>> old_rev = person.rev
>>> person.name
u'John Doe'
>>> person.age
42
>>> person.added #doctest: +ELLIPSIS
datetime.datetime(...)
To update a document, si | mply set the attributes, and then call the ``store()``
method:
>>> person.name = 'John R. Doe'
>>> person.store(db) #doctest: +ELLIPSIS
<Person ...>
If you retrieve the document from the server again, you should be getting the
updated data:
>>> person = Person.load(db, person.id)
>>> person.name
u'John R. Doe'
>>> person.rev != old_rev
True
>>> del server['python-tests']
"""
import copy
from calendar import timegm
from datetime import date, datetime, time
from decimal import Decimal
from time import strptime, struct_time
from couchdb.design import ViewDefinition
__all__ = ['Mapping', 'Document', 'Field', 'TextField', 'FloatField',
'IntegerField', 'LongField', 'BooleanField', 'DecimalField',
'DateField', 'DateTimeField', 'TimeField', 'DictField', 'ListField',
'ViewField']
__docformat__ = 'restructuredtext en'
DEFAULT = object()
class Field(object):
"""Basic unit for mapping a piece of data between Python and JSON.
Instances of this class can be added to subclasses of `Document` to describe
the mapping of a document.
"""
def __init__(self, name=None, default=None):
self.name = name
self.default = default
def __get__(self, instance, owner):
if instance is None:
return self
value = instance._data.get(self.name)
if value is not None:
value = self._to_python(value)
elif self.default is not None:
default = self.default
if callable(default):
default = default()
value = default
return value
def __set__(self, instance, value):
if value is not None:
value = self._to_json(value)
instance._data[self.name] = value
def _to_python(self, value):
return unicode(value)
def _to_json(self, value):
return self._to_python(value)
class MappingMeta(type):
def __new__(cls, name, bases, d):
fields = {}
for base in bases:
if hasattr(base, '_fields'):
fields.update(base._fields)
for attrname, attrval in d.items():
if isinstance(attrval, Field):
if not attrval.name:
attrval.name = attrname
fields[attrname] = attrval
d['_fields'] = fields
return type.__new__(cls, name, bases, d)
class Mapping(object):
__metaclass__ = MappingMeta
def __init__(self, **values):
self._data = {}
for attrname, field in self._fields.items():
if attrname in values:
setattr(self, attrname, values.pop(attrname))
else:
setattr(self, attrname, getattr(self, attrname))
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data or ())
def __delitem__(self, name):
del self._data[name]
def __getitem__(self, name):
return self._data[name]
def __setitem__(self, name, value):
self._data[name] = value
def get(self, name, default=None):
return self._data.get(name, default)
def setdefault(self, name, default):
return self._data.setdefault(name, default)
def unwrap(self):
return self._data
@classmethod
def build(cls, **d):
fields = {}
for attrname, attrval in d.items():
if not attrval.name:
attrval.name = attrname
fields[attrname] = attrval
d['_fields'] = fields
return type('AnonymousStruct', (cls,), d)
@classmethod
def wrap(cls, data):
instance = cls()
instance._data = data
return instance
def _to_python(self, value):
return self.wrap(value)
def _to_json(self, value):
return self.unwrap()
class ViewField(object):
r"""Descriptor that can be used to bind a view definition to a property of
a `Document` class.
>>> class Person(Document):
... name = TextField()
... age = IntegerField()
... by_name = ViewField('people', '''\
... function(doc) {
... emit(doc.name, doc);
... }''')
>>> Person.by_name
<ViewDefinition '_design/people/_view/by_name'>
>>> print Person.by_name.map_fun
function(doc) {
emit(doc.name, doc);
}
That property can be used as a function, which will execute the view.
>>> from couchdb import Database
>>> db = Database('python-tests')
>>> Person.by_name(db, count=3)
<ViewResults <PermanentView '_design/people/_view/by_name'> {'count': 3}>
The results produced by the view are automatically wrapped in the
`Document` subclass the descriptor is bound to. In this example, it would
return instances of the `Person` class. But please note that this requires
the values of the view results to be dictionaries that can be mapped to the
mapping defined by the containing `Document` class. Alternatively, the
``include_docs`` query option can be used to inline the actual documents in
the view results, which will then be used instead of the values.
If you use Python view functions, this class can also be used as a
decorator:
>>> class Person(Document):
... name = TextField()
... age = IntegerField()
...
... @ViewField.define('people')
... def by_name(doc):
... yield doc['name'], doc
>>> Person.by_name
<ViewDefinition '_design/people/_view/by_name'>
>>> print Person.by_name.map_fun
def by_name(doc):
yield doc['name'], doc
"""
def __init__(self, design, map_fun, reduce_fun=None, name=None,
language='javascript', wrapper=DEFAULT, **defaults):
"""Initialize the view descriptor.
:param design: the name of the design document
:param map_fun: the map function code
:param reduce_fun: the reduce function code (optional)
:param name: the actual name of the view in the design document, if
it differs from the name the descriptor is assigned to
:param language: the name of the language used
:param wrapper: an optional callable that should be used to wrap the
result rows
:param defaults: default query string parameters to apply
"""
self.design = design
self.name = name
self.map_fun = map_fun
self.reduce_fun = reduce_fun
self.language = language
self.wrapper = wrapper
self.defaults = defaults
@classmethod
def define(cls, design, name=None, language='python', wrapper=DEFAULT,
**defaults):
"""Factory method for use as a decorator (only suitable for Python
view code).
"""
def view_wrapped(fun):
return cls(design, fun, |
tudennis/LeetCode---kamyu104-11-24-2015 | Python/plus-one-linked-list.py | Python | mit | 1,560 | 0 | # Time: O(n)
# Space: O(1)
# Definition fo | r singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# | Two pointers solution.
class Solution(object):
def plusOne(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return None
dummy = ListNode(0)
dummy.next = head
left, right = dummy, head
while right.next:
if right.val != 9:
left = right
right = right.next
if right.val != 9:
right.val += 1
else:
left.val += 1
right = left.next
while right:
right.val = 0
right = right.next
return dummy if dummy.val else dummy.next
# Time: O(n)
# Space: O(1)
class Solution2(object):
def plusOne(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
def reverseList(head):
dummy = ListNode(0)
curr = head
while curr:
dummy.next, curr.next, curr = curr, dummy.next, curr.next
return dummy.next
rev_head = reverseList(head)
curr, carry = rev_head, 1
while curr and carry:
curr.val += carry
carry = curr.val / 10
curr.val %= 10
if carry and curr.next is None:
curr.next = ListNode(0)
curr = curr.next
return reverseList(rev_head)
|
Robpol86/Flask-Large-Application-Example | tests/core/test_email.py | Python | mit | 1,330 | 0.001504 | from datetime import timedelta
import time
from pypi_portal.core.email import send_email, send_exception
from pypi_portal.extensions import mail, redis
def raise_and_send():
with mail.record_messages() as outbox:
try:
raise ValueError('Fake error.')
except ValueError:
send_exception('Test Email10')
return outbox
def test_send_email():
with mail.re | cord_messages() as outbox:
send_email('Test Email', 'Message body.')
assert 1 == len(outbox)
assert 'Test Email' == outbox[0].subject
with mail.record_messages() as outbox:
send_email('Test Email2', 'Message bo | dy.', throttle=1)
send_email('Test Email2', 'Message body.', throttle=timedelta(seconds=1))
send_email('Test Email9', 'Message body.', throttle=1)
time.sleep(1.1)
send_email('Test Email2', 'Message body.', throttle=1)
assert 3 == len(outbox)
assert ['Test Email2', 'Test Email9', 'Test Email2'] == [o.subject for o in outbox]
def test_send_exception():
redis.flushdb()
outbox = raise_and_send()
assert 1 == len(outbox)
assert 'Application Error: Test Email10' == outbox[0].subject
assert '<blockquote ' in outbox[0].html
assert 'Fake error.' in outbox[0].html
outbox = raise_and_send()
assert 0 == len(outbox)
|
shuhaowu/projecto | projecto/utils.py | Python | apache-2.0 | 5,402 | 0.01666 | import errno
import os
from flask import current_app, abort
import ujson
import werkzeug.utils
def safe_mkdirs(path):
if os.path.exists(path):
return
try:
os.mkdir(path)
except OSError as e:
# Check for race conditions. If for some reason two threads/greenlets/whatever
# tries to create the same dir at the same time, we will have an error thrown.
if not (e.errno == errno.EEXIST and os.path.isdir(path)):
raise
# ujson for both speed and compactness
def jsonify(**params):
response = current_app.make_response(ujson.dumps(params))
response.mimetype = "application/json"
return response
# Project access control helpers
from functools import wraps
from kvkit import NotFoundError
from flask.ext.lo | gin import current_user
from .models import Project, User
def hook_user_to_projects(user):
for email in user.emails:
for project in Project.index("unregistered_owners", email):
project.unregistered_owners.remove(email)
project.owners.append(user.key)
project.save()
for project in | Project.index("unregistered_collaborators", email):
project.unregistered_collaborators.remove(email)
project.collaborators.append(user.key)
project.save()
def project_access_required(fn):
"""This will allow anyone who is currently registered in that project to
access the project. Denying the rest. It requires a project_id. It will also
resolve a project and pass that instance into the function as oppose to just
passing project_id. Furthermore, if an user is found to be in unregistered,
he will be moved into owner or participant list.
This is a fairly heavy operation for now. We will probably need to speed it
up in the future.
"""
@wraps(fn)
def wrapped(*args, **kwargs):
project_id = kwargs.pop("project_id")
if project_id is None:
raise ValueError("Project_id is required! This is probably a programming error.")
if not current_user.is_authenticated():
return abort(403)
try:
project = Project.get(project_id)
except NotFoundError:
return abort(404)
# Move users from unregistered to registered if found.
for email in current_user.emails:
for userkey in project.owners:
if email in User.get(userkey).emails:
return fn(project=project, *args, **kwargs)
for userkey in project.collaborators:
if email in User.get(userkey).emails:
return fn(project=project, *args, **kwargs)
return abort(403)
return wrapped
def project_managers_required(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
project_id = kwargs.pop("project_id")
if project_id is None:
raise ValueError("Project_id is required! This is probably a programming error.")
if not current_user.is_authenticated():
return abort(403)
try:
project = Project.get(project_id)
except NotFoundError:
return abort(404)
for email in current_user.emails:
for userkey in project.owners:
if email in User.get(userkey).emails:
return fn(project=project, *args, **kwargs)
return abort(403)
return wrapped
from flask import request
from kvkit import ValidationError
def ensure_good_request(required_parameters, accepted_parameters=None, allow_json_none=False):
"""Ensure that the request is good. aborts with 400 otherwise.
accepted_parameters and required_parameters are both sets. If accepted_parameters is None,
it is then the same as required_parameters. len(required_parameters) <= len(accepted_parameters)
"""
if accepted_parameters is None:
accepted_parameters = required_parameters
def decorator(f):
@wraps(f)
def fn(*args, **kwargs):
if request.json:
if len(request.json) > len(accepted_parameters) or len(request.json) < len(required_parameters):
return abort(400)
parameters_provided = set(request.json.keys())
if not (parameters_provided >= required_parameters) or not (parameters_provided <= accepted_parameters):
return abort(400)
else:
if not allow_json_none:
return abort(400)
try:
return f(*args, **kwargs)
except ValidationError:
return abort(400)
return fn
return decorator
# Helper for markdown
import misaka
MARKDOWN_EXTENSIONS = misaka.EXT_FENCED_CODE | misaka.EXT_STRIKETHROUGH
HTML_FLAGS = misaka.HTML_ESCAPE | misaka.HTML_SMARTYPANTS | misaka.HTML_SAFELINK
def markdown_to_html(s):
return misaka.html(s, MARKDOWN_EXTENSIONS, HTML_FLAGS)
def markdown_to_db(s):
return {"markdown": s, "html": markdown_to_html(s)}
def parse_path(path, project_key):
"""Parses a path that is passed from the client securely.
Returns a filesystem path as well as a key for db and if it is a directory.
"""
path = path.lstrip("/")
is_directory = (path[-1] == "")
path = [werkzeug.utils.secure_filename(p.strip()) for p in path.split("/") if p.strip() not in ("..", ".")]
if is_directory:
path += "/"
fspath = os.join(current_app.config["APP_FOLDER"], project_key, path)
key = project_key + "`/" + path
return key, fspath, is_directory
def secure_path(path):
# SECURITY: please review
path = path.lstrip(os.path.sep)
temp = []
for p in path.split(os.path.sep):
p = werkzeug.utils.secure_filename(p)
if p:
temp.append(p)
path = os.path.join(*temp)
return path
|
Goodly/TextThresher | thresher_backend/storage.py | Python | apache-2.0 | 446 | 0.002242 | from django.contrib.staticfiles import storage |
# Configure the permissions used by ./manage.py collectstatic
# See https://docs.djangoproject.com/en/1.10/ref/contrib/staticfiles/
class TTStaticFilesStorage(storage.StaticFilesStorage):
def __init__(self, *args, **kwargs):
kwargs['file_permissions_mode | '] = 0o644
kwargs['directory_permissions_mode'] = 0o755
super(TTStaticFilesStorage, self).__init__(*args, **kwargs)
|
CodingRobots/CodingRobots | robots/examples/Ninja.py | Python | gpl-3.0 | 2,216 | 0.000903 | from robot import Robot
class TheRobot(Robot):
def initialize(self):
# Try to get in to a corner
self.forseconds(5, self.force, 50)
self.forseconds(0.9, self.force, -10)
self.forseconds(0.7, self.torque, 100)
self.forseconds(6, self.force, 50)
# Then look around and shoot stuff
self.forever(self.scanfire)
self | ._turretdirect | ion = 1
self.turret(180)
self._pingfoundrobot = None
def scanfire(self):
self.ping()
sensors = self.sensors
kind, angle, dist = sensors['PING']
tur = sensors['TUR']
if self._pingfoundrobot is not None:
# has pinged a robot previously
if angle == self._pingfoundrobot:
# This is where we saw the robot previously
if kind in 'rb':
# Something is still there, so shoot it
self.fire()
else:
# No robot there now
self._pingfoundrobot = None
elif kind == 'r':
# This is not where we saw something before,
# but there is a robot at this location also
self.fire()
self._pingfoundrobot = angle
self.turret(angle)
else:
# No robot where we just pinged. So go back to
# where we saw a robot before.
self.turret(self._pingfoundrobot)
elif kind == 'r':
# pinged a robot
# shoot it
self.fire()
# keep the turret here, and see if we can get it again
self._pingfoundrobot = angle
self.turret(angle)
else:
# No robot pinged, and have not seen a robot yet
# so move the turret and keep looking
if self._turretdirection == 1:
if tur < 180:
self.turret(180)
else:
self._turretdirection = 0
elif self._turretdirection == 0:
if tur > 90:
self.turret(90)
else:
self._turretdirection = 1
|
AmandaMoen/AmandaMoen | students/KarlGentner/list_lab.py | Python | gpl-2.0 | 4,184 | 0.001195 | #!/usr/bin/python
import sys
import copy
# Create fruitlist
fruitlist = [u"Apples", u"Pears", u"Oranges", u"Peaches"]
# Display all fruit - helper method
def displayAllFruit(fruitlist):
sys.stdout.write("\nHere is the current list of fruit:\n")
sys.stdout.write(", ".join(fruitlist))
sys.stdout.write("\n\n")
# isInt - validate whether user input string is an integer
def isInt(userInput):
try:
int(userInput)
return True
except ValueError:
return False
# List Lab - Series 1
def fruitSeriesOne(fruitlist):
f = copy.copy(fruitlist)
displayAllFruit(f)
# User add to end of fruit list
userInput = ""
while userInput == "":
userInput = raw_input("Name a fruit" +
" to add to the end.-->").decode()
userInput = userInput.capitalize()
| f.append(userInput)
displayAllFruit(f)
# User pick a fruit list index to display
userInput = ""
while isInt(userInput) is False or int(userInput) <= 0 or int(userInput) > len(f):
userInput = raw_input("Pick a number between 1 and "
| + str(len(f)) +
" to display the corresponding fruit.-->")
sys.stdout.write("\nFruit #" + userInput + ": "
+ f[int(userInput)-1] + "\n")
sys.stdout.write("\n")
# Add to beginning of fruit list using "+"
sys.stdout.write("Let's add 'Grapes' to the beginning of the list.-->")
f = [u"Grapes"] + f
displayAllFruit(f)
# Add to beginning of fruit list using insert
sys.stdout.write("Let's add 'Bananas' to the beginning of the list.-->")
f.insert(0, u"Bananas")
displayAllFruit(f)
# Display all fruits in the list that begin with the letter P
p_fruitlist = []
sys.stdout.write("Here are the fruits in the current list" +
" that start with the letter 'P'?\n"),
for fruit in f:
if fruit.startswith('P'):
p_fruitlist.append(fruit)
sys.stdout.write(", ".join(p_fruitlist))
sys.stdout.write("\n\n")
# List Lab - Series 2
def fruitSeriesTwo(fruitlist):
f = copy.copy(fruitlist)
displayAllFruit(f)
# Remove last fruit from fruitlist
sys.stdout.write("Let's remove the last fruit from the list...\n")
f.pop()
displayAllFruit(f)
# User pick a fruit to delete
userInput = ""
while userInput not in f:
userInput = raw_input("Name a fruit" +
" to delete from the list.-->").decode()
userInput = userInput.capitalize()
f.remove(userInput)
displayAllFruit(f)
# List Lab - Series 3
def fruitSeriesThree(fruitlist):
f = copy.copy(fruitlist)
# User picks fruit to delete one by one
for fruit in f[:]:
userInput = raw_input("Do you like " +
fruit.lower() + "?-->").decode()
while userInput != 'no' and userInput != 'yes':
userInput = raw_input("Do you like " +
fruit.lower() +
"? Please answer 'yes' or 'no'-->").decode()
if userInput == "no":
f.remove(fruit)
displayAllFruit(f)
# List Lab - Series 4
def fruitSeriesFour(fruitlist):
# Make a copy of the fruitlist & spell each fruit backwards
reverseSpellList = copy.copy(fruitlist)
for fruit in fruitlist:
reverseSpellList.append(fruit[::-1])
reverseSpellList.remove(fruit)
sys.stdout.write("Here is the copied list with spelling reversed:\n")
sys.stdout.write(", ".join(reverseSpellList))
sys.stdout.write("\n\nLet's remove the last item of the original list.\n")
fruitlist.pop()
displayAllFruit(fruitlist)
sys.stdout.write("---------------------Series One----------------------\n")
fruitSeriesOne(fruitlist)
sys.stdout.write("---------------------Series Two ---------------------\n")
fruitSeriesTwo(fruitlist)
sys.stdout.write("---------------------Series Three -------------------\n")
fruitSeriesThree(fruitlist)
sys.stdout.write("---------------------Series Four --------------------\n")
fruitSeriesFour(fruitlist)
|
ttsirkia/a-plus | course/migrations/0011_auto_20151215_1133.py | Python | gpl-3.0 | 624 | 0.001603 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cou | rse', '0010_auto_20151214_1714'),
]
operations = [
migrations.AddField(
model_name='coursechapter',
name='pare | nt',
field=models.ForeignKey(to='course.CourseChapter', blank=True, null=True, related_name='children'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='coursechapter',
unique_together=set([]),
),
]
|
amcw7777/python-exercises | cs177/project4/tetris.py | Python | apache-2.0 | 3,217 | 0.010258 | import sys
import pygame
import time
from tetris_window import *
from tetrimino_factory import *
# TASK 0:
#
# Enter your group information:
#
GROUP_ID = 0
AUTHOR1 = 'Sait Celebi'
AUTHOR1_PURDUE_USERNAME = 'celebis'
AUTHOR2 = 'John Doe'
AUTHOR2_PURDUE_USERNAME = 'john123'
AUTHOR3 = 'Donald Knuth'
AUTHOR3_PURDUE_USERNAME = 'donald90'
########################### Global Constants ##############################
# size of the blocks
BLOCK_SIZE = 40 # must be even
# number of blocks in x direction
NUM_BLOCKS_X = 11
# number of blocks in y direction
NUM_BLOCKS_Y = 20
# frames per second. this is effectively speed of snake in this program.
# read this if you are not familiar:
# https://en.wikipedia.org/wiki/Frame_rate
FPS = 16
###########################################################################
# The main() function is given. Do not change this.
# However, you are encouraged to change the above parameters and
# test your program with different set of parameters.
def main():
tetris_window = TetrisWindow( BLOCK_SIZE, NUM_BLOCKS_X, NUM_BLOCKS_Y )
tetrimino_factory = TetriminoFactory( tetris_window )
current_tetrimino = tetrimino_factory.get_random_object()
counter = 0
while True:
moved_left, moved_right = False, False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
current_tetrimino.rotate()
elif event.key == pygame.K_LEFT:
current_tetrimino.move_left()
moved_left = True
time.sleep(0.15)
elif event.key == pygame.K_RIGHT:
current_tetrimino.move_right()
moved_right = True
time.sleep(0.15)
elif event.key == pygame.K_SPACE:
current_tetrimino.land()
keys = pygame.key.get_pressed()
left, right, down = keys[ pygame.K_LEFT ], keys[ pygame.K_RIGHT ], keys[ pygame.K_DOWN ]
if left and not moved_left: current_tetrimino.move_left()
if right and not moved_right: current_tetrimino.move_right()
if down: current_tetrimino | .move_down()
tetris_window.surface.fill( (0,0,0) ) # black bac | kground
counter += 1
if counter == 5:
current_tetrimino.move_down()
counter = 0
current_tetrimino.draw_tetrimino()
if current_tetrimino.has_landed():
tetris_window.add_tetrimino_to_landed_objects(current_tetrimino)
tetris_window.update_landed_objects()
current_tetrimino = tetrimino_factory.get_random_object()
tetris_window.draw_landed_objects()
tetris_window.draw_grid()
if tetris_window.is_game_over(current_tetrimino):
tetris_window.reset_window()
# update the window with the last drawings
pygame.display.update()
# set fps (speed)
tetris_window.clock.tick(FPS)
pygame.quit()
if __name__ == '__main__':
main()
|
esitamon/django-skeleton | app/website/api.py | Python | gpl-3.0 | 840 | 0.00119 | f | rom rest_framework import viewsets, permissions
import models
import serializers
class PageViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
"""
queryset = models.Page.objects.all()
serializer_class = serializers.PageSerializer
permission_classes = (permissions.IsAdminUser, )
def pre_save(self, obj):
obj.owner = self.request.user
class PostViewSet(viewsets.ModelViewSet):
| """
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
"""
queryset = models.Post.objects.all()
serializer_class = serializers.PostSerializer
permission_classes = (permissions.IsAdminUser, )
def pre_save(self, obj):
obj.owner = self.request.user
|
itmages/itmages-service | itmagesd/common.py | Python | gpl-2.0 | 2,588 | 0.004637 | #
# -*- coding: utf-8 -*-
#
# Copyright 2011 Voldemar Khramtsov <harestomper@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#-------------------------------------------------------------------------------
import os
import glib
import tempfile
class stat:
cancel_all = False
quiting = False
temporary = []
pause = False
verbose = True
isdaemon = False
DBUS_INTERFACE = 'org.freedesktop.ITmagesEngine'
DBUS_PATH = '/org/freedesktop/ITmagesEngine'
MIMETYPES = ('image/jpeg', 'image/png', 'image/gif')
RESP_CANCELLED = '''<response><status>cancelled</status>separator<reason>Action cancelled</reason></response>'''
USERCONFIGDIR = os.path.join(glib.get_user_config_dir(), "itmagesd")
TEMPFOLDER = os.path.join(glib.get_user_cache_dir(), 'itmages', 'pictures')
tempfile.tempdir = TEMPFOLDER
stat.temporary.append(TEMPFOLDER)
SOCKNAME = tempfile.mktemp('.socket')
DAEMON_CONF = os.path.join(USERCONFIGDIR, "service.xml")
if not os.path.exists(TEMPFOLDER):
os.makedirs(TEMPFOLDER)
if not os.path.exists(USERCONFIGDIR):
os.makedirs(USERCONFIGDIR)
class Messages:
UNSUPPORTED = "Is unsupported argument type."
CAN_NOT_BE_PUT = "This file can not be put to queue."
ITEM_EXISTS = "This file is already exists in queue."
IMPOSSIBLE_METHOD = "Impossible for apply this specified method"
ARGUMENTS_REQUIRES = "This action requires arguments 'user', 'passwd', 'key' and 'id'."
USERDATA_REQUIRED = "Not found 'user' or 'passwd' keys. For this action i | ts keys is required."
class ActionType:
IOMOD_RESPONSE = "iomodresponse"
IOMOD | _PROGRESS = "iomodprogress"
def echo (message, output="o"):
if stat.verbose:
if output == "e":
std = sys.stderr
else:
std = sys.stdout
std.write("%s\n" % message)
###
|
newmediamedicine/indivo_server_1_0 | indivo/document_processing/idp_objs/equipmentscheduleitem.py | Python | gpl-3.0 | 3,298 | 0.014554 | from indivo.lib import iso8601
from indivo.models import EquipmentScheduleItem
XML = 'xml'
DOM = 'dom'
class IDP_Equipm | entScheduleItem:
def post_data(self, name=None,
name_type=None,
| name_value=None,
name_abbrev=None,
scheduledBy=None,
dateScheduled=None,
dateStart=None,
dateEnd=None,
recurrenceRule_frequency=None,
recurrenceRule_frequency_type=None,
recurrenceRule_frequency_value=None,
recurrenceRule_frequency_abbrev=None,
recurrenceRule_interval=None,
recurrenceRule_interval_type=None,
recurrenceRule_interval_value=None,
recurrenceRule_interval_abbrev=None,
recurrenceRule_dateUntil=None,
recurrenceRule_count=None,
instructions=None):
"""
SZ: More error checking needs to be performed in this method
"""
try:
if dateScheduled:
"""
Elliot: 3/4 changed parse_utc_date to parse_date to handle XML:datetime
"""
dateScheduled = iso8601.parse_date(dateScheduled)
if dateStart:
"""
Elliot: 3/4 changed parse_utc_date to parse_date to handle XML:datetime
"""
dateStart = iso8601.parse_date(dateStart)
if dateEnd:
"""
Elliot: 3/4 changed parse_utc_date to parse_date to handle XML:datetime
"""
dateEnd = iso8601.parse_date(dateEnd)
if recurrenceRule_dateUntil:
"""
Elliot: 3/4 changed parse_utc_date to parse_date to handle XML:datetime
"""
recurrenceRule_dateUntil = iso8601.parse_date(recurrenceRule_dateUntil)
equipmentscheduleitem_obj = EquipmentScheduleItem.objects.create(
name=name,
name_type=name_type,
name_value=name_value,
name_abbrev=name_abbrev,
scheduled_by=scheduledBy,
date_scheduled=dateScheduled,
date_start=dateStart,
date_end=dateEnd,
recurrencerule_frequency=recurrenceRule_frequency,
recurrencerule_frequency_type=recurrenceRule_frequency_type,
recurrencerule_frequency_value=recurrenceRule_frequency_value,
recurrencerule_frequency_abbrev=recurrenceRule_frequency_abbrev,
recurrencerule_interval=recurrenceRule_interval,
recurrencerule_interval_type=recurrenceRule_interval_type,
recurrencerule_interval_value=recurrenceRule_interval_value,
recurrencerule_interval_abbrev=recurrenceRule_interval_abbrev,
recurrencerule_dateuntil=recurrenceRule_dateUntil,
recurrencerule_count=recurrenceRule_count,
instructions=instructions)
return equipmentscheduleitem_obj
except Exception, e:
print "Error: " + str(e)
raise ValueError("problem processing equipmentscheduleitem report " + str(e))
|
pyroscope/pyrocore | src/pyrocore/torrent/queue.py | Python | gpl-2.0 | 6,987 | 0.00458 | # -*- coding: utf-8 -*-
# pylint: disable=I0011
""" rTorrent Queue Manager.
Copyright (c) 2012 The PyroScope Project <pyroscope.project@gmail.com>
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import with_statement
from __future__ import absolute_import
import time
from pyrocore import error
from pyrocore import config as config_ini
from pyrocore.util import fmt, xmlrpc, pymagic
from pyrocore.torrent import engine, matching, formatting
class QueueManager(object):
""" rTorrent queue manager implementation.
"""
# Special view containing all items that are transferring data, have peers connected, or are incomplete
VIEWNAME = "pyrotorque"
def __init__(self, config=None):
""" Set up queue manager.
"""
self.config = config or {}
self.proxy = None
self.last_start = 0
self.LOG = pymagic.get_class_logger(self)
if 'log_level' in self.config:
self.LOG.setLevel(config.log_level)
self.LOG.debug("Queue manager created with config %r" % self.config)
bool_param = lambda key, default: matching.truth(self.config.get(key, default), "job.%s.%s" % (self.config.job_name, key))
self.config.quiet = bool_param("quiet", False)
self.config.startable = matching.ConditionParser(engine.FieldDefinition.lookup, "name").parse(
"[ %s ] [ %s ]" % (config_ini.torque['queue_startable_base'], self.config.startable)
)
self.LOG.info("Startable matcher for '%s' is: [ %s ]" % (self.config.job_name, self.config.startable))
self.config.downloading = matching.ConditionParser(engine.FieldDefinition.lookup, "name").parse(
"is_active=1 is_complete=0" + (" [ %s ]" % self.config.downloading if "downloading" in self.config else "")
)
self.LOG.info("Downloading matcher for '%s' is: [ %s ]" % (self.config.job_name, self.config.downloading))
self.sort_key = formatting.validate_sort_fields(self.config.sort_fields) if self.config.sort_fields.strip() else None
def _start(self, items):
""" Start some items if conditions are met.
"""
# TODO: Filter by a custom date field, for scheduled downloads starting at a certain time, or after a given delay
# TODO: Don't start anything more if download BW is used >= config threshold in %
# Check if anything more is ready to start downloading
startable = [i for i in items if self.config.startable.match(i)]
if not startable:
self.LOG.debug("Checked %d item(s), none startable according to [ %s ]",
len(items), self.config.startable)
return
# Check intermission delay
now = time.time()
if now < self.last_start:
# compensate for summer time and other oddities
self.last_start = now
delayed = int(self.last_start + self.config.intermission - now)
if delayed > 0:
self.LOG.debug("Delaying start of {:d} item(s),"
" due to {:d}s intermission with {:d}s left"
.format(len(startable), self.config.intermission, delayed))
return
# TODO: sort by priority, then loaded time
# Stick to "start_at_once" parameter, unless "downloading_min" is violated
downloading = [i for i in items if self.config.downloading.match(i)]
start_now = max(self.config.start_at_once, self.config.downloading_min - len(downloading))
start_now = min(start_now, len(startable))
#down_traffic = sum(i.down for i in downloading)
##self.LOG.info("%d downloading, down %d" % (len(downloading), down_traffic))
# Start eligible items
for idx, item in enumerate(startable):
# Check if we reached 'start_now' in this run
if idx >= start_now:
self.LOG.debug("Only starting %d item(s) in this run, %d more could be downloading" % (
start_now, len(startable)-idx,))
break
# TODO: Prevent start of more torrents that can fit on the drive (taking "off" files into account)
# (restarts items that were stopped due to the "low_diskspace" schedule, and also avoids triggering it at all)
# Only check the other conditions when we have `downloading_min` covered
if len(downloading) < self.config.downloading_min:
self.LOG.debug("Catching up from %d to a minimum of %d downloading item(s)" % (
len(downloading), self.config.downloading_min))
else:
# Limit to the given maximum of downloading items
if len(downloading) >= self.config.downloading_max:
self.LOG.debug("Already downloading %d item(s) out of %d max, %d more could be downloading" % (
len(downloading), self.config.downloading_max, len(startable)-idx,))
break
# If we made it here, start it!
self.last_start = now
downloading.append(item)
self.LOG.info("%s '%s' [%s, #%s]" % (
"WOULD start" if self.config.dry_run else "Starting",
fmt.to_utf8(item.name), item.alias, item.hash))
if not self.config.dry_run:
item.start()
if not self.config.qui | et:
self.pr | oxy.log(xmlrpc.NOHASH, "%s: Started '%s' {%s}" % (
self.__class__.__name__, fmt.to_utf8(item.name), item.alias,
))
def run(self):
""" Queue manager job callback.
"""
try:
self.proxy = config_ini.engine.open()
# Get items from 'pyrotorque' view
items = list(config_ini.engine.items(self.VIEWNAME, cache=False))
if self.sort_key:
items.sort(key=self.sort_key)
#self.LOG.debug("Sorted: %r" % [i.name for i in items])
# Handle found items
self._start(items)
self.LOG.debug("%s - %s" % (config_ini.engine.engine_id, self.proxy))
except (error.LoggableError, xmlrpc.ERRORS) as exc:
# only debug, let the statistics logger do its job
self.LOG.debug(str(exc))
|
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractNightskytlWordpressCom.py | Python | bsd-3-clause | 560 | 0.033929 |
def extractNightskytlWordpressCom(item):
'''
Parser for 'nightskytl.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
retu | rn buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=t | l_type)
return False
|
UKN-DBVIS/SciBib | app/backend/db_controller/query/authors_publications.py | Python | apache-2.0 | 1,871 | 0.003207 | # Copyright (C) 2020 University of Konstanz - Data Analysis and Visualization Group
# This file is part of SciBib <https://github.com/dbvis-ukon/SciBib>.
#
# SciBib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, eit | her version 3 of the License, or
# (at your option) any later version.
#
# SciBib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Genera | l Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SciBib. If not, see <http://www.gnu.org/licenses/>.
from backend.db_controller.db import SQLAlchemy
from backend.db_controller.db import Authors_publications
from backend.db_controller.helper import isInt
db = SQLAlchemy()
def addAuthorsPublications(session, **columns):
"""
Add authors <-> publications mapping to the database
@param session: An open database session
@type session: SQLAlchemy Session
@param columns: a dict containing an 'author_id', 'publication_id' and the 'position' of the author in the paper
@type columns: dict
@return: the newly created AuthorPublications object
@rtype: AuthorsPublication object
"""
if not isInt(columns.get('author_id', '') or not isInt(columns.get('publication_id'))) or not isInt(columns.get('position')):
raise ValueError('Author_id, publication_id and position must be numbers')
newAuthorsPublications = Authors_publications(
author_id=columns['author_id'],
publication_id=columns['publication_id'],
position=columns['position']
)
session.add(newAuthorsPublications)
return newAuthorsPublications |
wbsoft/frescobaldi | frescobaldi_app/snippet/import_export.py | Python | gpl-2.0 | 7,763 | 0.000902 | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Import and export of snippets.
"""
import os
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QMessageBox, QTreeWidget, QTreeWidgetItem
import app
import appinfo
import qutil
import userguide
import widgets.dialog
from . import model
from . import snippets
from . import builtin
def save(names, filename):
"""Saves the named snippets to a file."""
root = ET.Element('snippets')
root.text = '\n\n'
root.tail = '\n'
d = ET.ElementTree(root)
comment = ET.Comment(_comment.format(appinfo=appinfo))
comment.tail = '\n\n'
root.append(comment)
for name in names:
snippet = ET.Element('snippet')
snippet.set('id', name)
snippet.text = '\n'
snippet.tail = '\n\n'
title = ET.Element('title')
title.text = snippets.title(name, False)
title.tail = '\n'
shortcuts = ET.Element('shortcuts')
ss = model.shortcuts(name)
if ss:
shortcuts.text = '\n'
for s in ss:
shortcut = ET.Element('shortcut')
shortcut.text = s.toString()
shortcut.tail = '\n'
shortcuts.append(shortcut)
shortcuts.tail = '\n'
body = ET.Element('body')
body.text = snippets.text(name)
body.tail = '\n'
snippet.append(title)
snippet.append(shortcuts)
snippet.append(body)
root.append(snippet)
d.write(filename, "UTF-8")
def load(filename, widget):
"""Loads snippets from a file, displaying them in a list.
The user can then choose:
- overwrite builtin snippets or not
- overwrite own snippets with same title or not
- select and view snippets contents.
"""
try:
d = ET.parse(filename)
elements = list(d.findall('snippet'))
if not elements:
raise ValueError(_("No snippets found."))
except Exception as e:
QMessageBox.critical(widget, app.caption(_("Error")),
_("Can't read from source:\n\n{url}\n\n{error}").format(
url=filename, error=e))
return
dlg = widgets.dialog.Dialog(widget)
dlg.setWindowModality(Qt.WindowModal)
dlg.setWindowTitle(app.caption(_("dialog title", "Import Snippets")))
tree = QTreeWidget(headerHidden=True, rootIsDecorated=False)
dlg.setMainWidget(tree)
userguide.addButton(dlg.buttonBox(), "snippet_import_export")
allnames = frozenset(snippets.names())
builtins = frozenset(builtin.builtin_snippets)
titles = dict((snippets.title(n), n) for n in allnames if n not in builtins)
new = QTreeWidgetItem(tree, [_("New Snippets")])
updated = QTreeWidgetItem(tree, [_("Updated Snippets")])
unchanged = QTreeWidgetItem(tree, [_("Unchanged Snippets")])
new.setFlags(Qt.ItemIsEnabled)
updated.setFlags(Qt.ItemIsEnabled)
unchanged.setFlags(Qt.ItemIsEnabled)
new.setExpanded(True)
updated.setExpanded(True)
items = []
for snip in elements:
item = QTreeWidgetItem()
item.body = snip.find('body').text
item.title = snip.find('title').text
item.shortcuts = list(e.text for e in snip.findall('shortcuts/shortcut'))
title = item.title or snippets.maketitle(snippets.parse(item.body).text)
item.setText(0, title)
name = snip.get('id')
name = name if name in builtins else None
# determine if new, updated or unchanged
if not name:
name = titles.get(title)
item.name = name
if not name or name not in allnames:
new.addChild(item)
items.append(item)
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsUserCheckable)
item.setCheckState(0, Qt.Checked)
elif name:
if (item.body != snippets.text(name)
or title != snippets.title(name)
or (item.shortcuts and item.shortcuts !=
[s.toString() for s in model.shortcuts(name) or ()])):
updated.addChild(item)
items.append(item)
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsUserCheckable)
item.setCheckState(0, Qt.Checked)
else:
unchanged.addChild(item)
item.setFlags(Qt.ItemIsEnabled)
# count:
for i in new, updated, unchanged:
i.setText(0, i.text(0) + " ({0})".format(i.childCount()))
for i in new, updated:
if i.childCount():
i.setFlags(Qt.ItemIsEnabled | Qt.ItemIsUserCheckable)
i.setCheckState(0, Qt.Checked)
def changed(item):
if item in (new, updated):
for i in range(item.childCount()):
c = item.child(i)
c.setCheckState(0, item.checkState(0))
tree.itemChanged.connect(changed)
importShortcuts = QTreeWidgetItem([_("Import Keyboard Shortcuts")])
if items:
tree.addTopLevelItem(importShortcuts)
importShortcuts.setFlags(Qt.ItemIsEnabled | Qt.ItemIsUserCheckable)
importShortcuts.setCheckState(0 | , Qt.Checked)
dlg.setMessage(_("Choose which snippets you want to import:"))
else:
dlg.setMessage(_("There are no new or updated snippets in the file."))
unchanged.setExpanded(True)
tree.setWhatsThis(_(
"<p>Here the snippets from {filename} are displayed.</p>\n"
"<p>If there are new or updated snippets, you can select o | r deselect "
"them one by one, or all at once, using the checkbox of the group. "
"Then click OK to import all the selected snippets.</p>\n"
"<p>Existing, unchanged snippets can't be imported.</p>\n"
).format(filename=os.path.basename(filename)))
qutil.saveDialogSize(dlg, "snippettool/import/size", QSize(400, 300))
if not dlg.exec_() or not items:
return
ac = model.collection()
m = model.model()
with qutil.busyCursor():
for i in items:
if i.checkState(0) == Qt.Checked:
index = m.saveSnippet(i.name, i.body, i.title)
if i.shortcuts and importShortcuts.checkState(0):
shortcuts = list(map(QKeySequence.fromString, i.shortcuts))
ac.setShortcuts(m.name(index), shortcuts)
widget.updateColumnSizes()
_comment = """
Created by {appinfo.appname} {appinfo.version}.
Every snippet is represented by:
title: title text
shortcuts: list of shortcut elements, every shortcut is a key sequence
body: the snippet text
The snippet id attribute can be the name of a builtin snippet or a random
name like 'n123456'. In the latter case, the title is used to determine
whether a snippet is new or updated.
"""
|
chrisnatali/networkx | networkx/classes/graph.py | Python | bsd-3-clause | 55,144 | 0.000254 | """Base class for undirected graphs.
The Graph class allows any hashable object as a node
and can associate key/value attribute pairs with each undirected edge.
Self-loops are allowed but multiple edges are not (see MultiGraph).
For directed graphs see DiGraph and MultiDiGraph.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from copy import deepcopy
import networkx as nx
from networkx.exception import NetworkXError
import networkx.convert as convert
__author__ = """\n""".join(['Aric Hagberg (hagberg@lanl.gov)',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
class Graph(object):
"""
Base class for undirected graphs.
A Graph stores nodes and edges with optional data, or attributes.
Graphs hold undirected edges. Self loops are allowed but multiple
(parallel) edges are not.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
DiGraph
MultiGraph
MultiDiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.Graph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2,3])
>>> G.add_nodes_from(range(100,110))
>>> H=nx.Graph()
>>> H.add_path([0,1,2,3,4,5,6,7,8,9])
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1,2),(1,3)])
or a collection of edges,
>>> G.add_edges_from(H.edges())
If some edges connect nodes not yet in the graph, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.Graph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.node
>>> G.add_node(1, time='5pm')
>>> G.add_nodes_from([3], time='2pm')
>>> G.node[1]
{'time': '5pm'}
>>> G.node[1]['room'] = 714
>>> del G.node[1]['room'] # remove attribute
>>> list(G.nodes(data=True))
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Warning: adding a node to G.node does not add it to the graph.
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edge.
>>> G.add_edge(1, 2, weight=4.7 )
>>> G.add_edges_from([(3,4),(4,5)], color='red')
>>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
>>> G[1][2]['weight'] = 4.7
>>> G.edge[1][2]['weight'] = 4
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n<3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
The fastest way to traverse all edges of a graph is via
adjacency(), but the edges() method is often more convenient.
>>> for n,nbrsdict in G.adjacency():
... for nbr,eattr in nbrsdict.items():
... if 'weight' in eattr:
... (n,nbr,eattr['weight'])
(1, 2, 4)
(2, 1, 4)
(2, 3, 8)
(3, 2, 8)
>>> list(G.edges(data='weight'))
[(1, 2, 4), (2, 3, 8), (3, 4, None), (4, 5, None)]
**Reporting:**
Simple graph information is obtained using methods.
Reporting methods usually return iterators instead of containers
to reduce memory usage.
Methods exist for reporting nodes(), edges(), neighbors() and degree()
as well as the number of nodes and edges.
For details on these and other miscellaneous methods, see below.
**Subclasses (Advanced):**
The Graph class uses a dict-of-dict-of-dict data structure.
The outer dict (node_dict) holds adjacency lists keyed by node.
The next di | ct (adjlist) represents the adjacency list and holds
edge data keyed by neighbor. The inner dict (edge_attr) represents
the edge data and holds edge attribute values keyed by attribute names.
Each of these three dicts can be replaced by a user defined
dict-like object. In general, the dict-like features should be
maintained but extra features can be added. To replace one of the
dicts create a new graph class by changing the class(!) variable
h | olding the factory for that dict-like structure. The variable names
are node_dict_factory, adjlist_dict_factory and edge_attr_dict_factory.
node_dict_factory : function, (default: dict)
Factory function to be used to create the outer-most dict
in the data structure that holds adjacency lists keyed by node.
It should require no arguments and return a dict-like object.
adjlist_dict_factory : function, (default: dict)
Factory function to be used to create the adjacency list
dict which holds edge data keyed by neighbor.
It should require no arguments and return a dict-like object
edge_attr_dict_factory : function, (default: dict)
Factory function to be used to create the edge attribute
dict which holds attrbute values keyed by attribute name.
It should require no arguments and return a dict-like object.
Examples
--------
Create a graph object that tracks the order nodes are added.
>>> from collections import OrderedDict
>>> class OrderedNodeGraph(nx.Graph):
... node_dict_factory=OrderedDict
>>> G=OrderedNodeGraph()
>>> G.add_nodes_from( (2,1) )
>>> list(G.nodes())
[2, 1]
>>> G.add_edges_from( ((2,2), (2,1), (1,1)) )
>>> list(G.edges())
[(2, 1), (2, 2), (1, 1)]
Create a graph object that tracks the order nodes are added
and for each node track the order that neighbors are added.
>>> class OrderedGraph(nx.Graph):
... node_dict_factory = OrderedDict
... adjlist_dict_factory = OrderedDict
>>> G = OrderedGraph()
>>> G.add_nodes_from( (2,1) )
>>> list(G.nodes())
[2, 1]
>>> G.add_edges_from( ((2,2), (2,1), (1,1)) )
>>> list(G.edges())
[(2, 2), (2, 1), (1, 1)]
Create a low memory graph class that effectively disallows edge
attributes by using a single attribute dict for all edges.
This reduces the memory used, but you lose edge attributes.
>>> class ThinGraph(nx.Graph):
... all_edge_dict = {'weight': 1}
... def single_edge_dict(self):
... return self.all_edge_dict
... edge_attr_dict_factory = single_edge_dict
>>> G = ThinGraph()
>>> G.add_edge(2,1)
>>> list(G.edges(data= True))
[(1, 2, {'weight': 1})]
>>> G.add_edge(2,2)
>>> G[2][1] is G[2][2]
True
"""
no |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.