repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
guibargar/stt-api | train_traffic_app/views.py | Python | mit | 1,323 | 0.005291 |
from rest_framework import generics
from rest_framework import permissions
from django.contrib.auth.models import User
from train_traffic_app.models import TrainTrafficRequest
from train_traffic_app.serializers import TrainTrafficRequestSerializer
from train_traffic_app.serializers import UserSerializer
from train_traffic_app.permissions import IsOwnerOrReadOnly
class TrainTrafficRequestList(generics.ListCreateAPIView):
"""
List all code train traffic requests, or create a new one.
"""
queryset = TrainTrafficRequest.objects.all()
serializer_class = TrainTrafficRequestSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
class TrainTrafficRequestDetail(gene | rics.RetrieveUpdateDestroyAPIView):
"""
Retrieve, update or delete a train_traffic_request.
"""
queryset = TrainTrafficRequest.objects.all()
serializer_class = TrainTrafficRequestSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,IsOwnerOrReadOnly,)
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer
class Us | erDetail(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserSerializer |
seejay/feedIO | feedio/rilPlugin.py | Python | gpl-3.0 | 1,738 | 0.004028 | #!/usr/bin/python
"""
Read It Later (RIL) module for feedIO.
"""
__ve | rsion__ = "0.0.5"
__license__ = """
Copyright (C) 2011 Sri Lanka Institute of Information Technology.
feedIO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
feedIO is distributed in the hope that it will be useful,
but WIT | HOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with feedIO. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Chanaka Jayamal <seejay@seejay.net>"
__developers__ = ["Chanaka Jayamal",
"Lanka Amarasekara",
"Kolitha Gajanayake",
"Chamika Viraj"]
API_KEY = 'f9fT6t40g092alT6a7d908ch34p1Ux04'
SESSION = None
import lib.readitlater.api as ril
import sys
class FeedIOError(Exception): pass
class LogInError(FeedIOError): pass
class RilSession:
"""
RilSession class to sync items from the Database with Read It Later
servers.
"""
def __init__ (self, user, pw):
self.session = ril.API(API_KEY, user, pw)
try:
self.session.auth()
except:
print "login Error!"
raise LogInError
def submitItem(self, item):
self.session.add(item.url,item.title)
def getItems(state="unread"):
self.session.get(state)
def setItemTag(self, item, tags):
pass
|
catapult-project/catapult-csm | third_party/google-endpoints/apitools/base/py/testing/mock.py | Python | bsd-3-clause | 12,590 | 0 | #
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The mock module allows easy mocking of apitools clients.
This module allows you to mock out the constructor of a particular apitools
client, for a specific API and version. Then, when the client is created, it
will be run against an expected session that you define. This way code that is
not aware of the testing framework can construct new clients as normal, as long
as it's all done within the context of a mock.
"""
import difflib
import six
from apitools.base.protorpclite import messages
from apitools.base.py import base_api
from apitools.base.py import encoding
from apitools.base.py import exceptions
class Error(Exception):
"""Exceptions for this module."""
def _MessagesEqual(msg1, msg2):
"""Compare two protorpc messages for equality.
Using python's == operator does not work in all cases, specifically when
there is a list involved.
Args:
msg1: protorpc.messages.Message or [protorpc.messages.Message] or number
or string, One of the messages to compare.
msg2: protorpc.messages.Message or [protorpc.messages.Message] or number
or string, One of the messages to compare.
Returns:
If the messages are isomorphic.
"""
if isinstance(msg1, list) and isinstance(msg2, list):
if len(msg1) != len(msg2):
return False
return all(_MessagesEqual(x, y) for x, y in zip(msg1, msg2))
if (not isinstance(msg1, messages.Message) or
not isinstance(msg2, messages.Message)):
return msg1 == msg2
for field in msg1.all_fields():
field1 = getattr(msg1, field.name)
field2 = getattr(msg2, field.name)
if not _MessagesEqual(field1, field2):
return False
return True
class UnexpectedRequestException(Error):
def __init__(self, received_call, expected_call):
expected_key, expected_request = expected_call
received_key, received_request = received_call
expected_repr = encoding.MessageToRepr(
expected_request, multiline=True)
received_repr = encoding.MessageToRepr(
received_request, multiline=True)
expected_lines = expected_repr.splitlines()
received_lines = received_repr.splitlines()
diff_lines = difflib.unified_diff(expected_lines, received_lines)
diff = '\n'.join(diff_lines)
if expected_key != received_key:
msg = '\n'.join((
'expected: {expected_key}({expected_request})',
'received: {received_key}({received_request})',
'',
)).format(
expected_key=expected_key,
expected_request=expected_repr,
received_key=received_key,
received_request=received_repr)
super(UnexpectedRequestException, self).__init__(msg)
else:
msg = '\n'.join((
'for request to {key},',
'expected: {expected_request}',
'received: {received_request}',
'diff: {diff}',
'', |
)).format(
key=expected_key,
expected_request=expected_ | repr,
received_request=received_repr,
diff=diff)
super(UnexpectedRequestException, self).__init__(msg)
class ExpectedRequestsException(Error):
def __init__(self, expected_calls):
msg = 'expected:\n'
for (key, request) in expected_calls:
msg += '{key}({request})\n'.format(
key=key,
request=encoding.MessageToRepr(request, multiline=True))
super(ExpectedRequestsException, self).__init__(msg)
class _ExpectedRequestResponse(object):
"""Encapsulation of an expected request and corresponding response."""
def __init__(self, key, request, response=None, exception=None):
self.__key = key
self.__request = request
if response and exception:
raise exceptions.ConfigurationValueError(
'Should specify at most one of response and exception')
if response and isinstance(response, exceptions.Error):
raise exceptions.ConfigurationValueError(
'Responses should not be an instance of Error')
if exception and not isinstance(exception, exceptions.Error):
raise exceptions.ConfigurationValueError(
'Exceptions must be instances of Error')
self.__response = response
self.__exception = exception
@property
def key(self):
return self.__key
@property
def request(self):
return self.__request
def ValidateAndRespond(self, key, request):
"""Validate that key and request match expectations, and respond if so.
Args:
key: str, Actual key to compare against expectations.
request: protorpc.messages.Message or [protorpc.messages.Message]
or number or string, Actual request to compare againt expectations
Raises:
UnexpectedRequestException: If key or request dont match
expectations.
apitools_base.Error: If a non-None exception is specified to
be thrown.
Returns:
The response that was specified to be returned.
"""
if key != self.__key or not _MessagesEqual(request, self.__request):
raise UnexpectedRequestException((key, request),
(self.__key, self.__request))
if self.__exception:
# Can only throw apitools_base.Error.
raise self.__exception # pylint: disable=raising-bad-type
return self.__response
class _MockedService(base_api.BaseApiService):
def __init__(self, key, mocked_client, methods, real_service):
super(_MockedService, self).__init__(mocked_client)
self.__dict__.update(real_service.__dict__)
for method in methods:
real_method = None
if real_service:
real_method = getattr(real_service, method)
setattr(self, method,
_MockedMethod(key + '.' + method,
mocked_client,
real_method))
class _MockedMethod(object):
"""A mocked API service method."""
def __init__(self, key, mocked_client, real_method):
self.__key = key
self.__mocked_client = mocked_client
self.__real_method = real_method
def Expect(self, request, response=None, exception=None, **unused_kwargs):
"""Add an expectation on the mocked method.
Exactly one of response and exception should be specified.
Args:
request: The request that should be expected
response: The response that should be returned or None if
exception is provided.
exception: An exception that should be thrown, or None.
"""
# TODO(jasmuth): the unused_kwargs provides a placeholder for
# future things that can be passed to Expect(), like special
# params to the method call.
# pylint: disable=protected-access
# Class in same module.
self.__mocked_client._request_responses.append(
_ExpectedRequestResponse(self.__key,
request,
response=response,
exception=exception))
# pylint: enable=protected-access
def __call__(self, request, **unused_kwargs):
# TODO(ja |
knuu/competitive-programming | atcoder/corp/codethanksfes2017_f.py | Python | mit | 421 | 0 | N, | K = map(int, input().split())
A = [int(input()) for _ in range(N)]
A.sort()
dp = [[0] * int(2e5) for _ in range(2)]
dp[0][0] = 1
limit = 0
MOD = 10 ** 9 + 7
for i in range(N):
prev, now = i % 2, (i + 1) % 2
for j in range(limit + 1):
dp[now][j] = dp[prev][j]
for j in range(limit + 1):
dp[now][j ^ A[i]] += dp[prev][j]
dp[now][j ^ A[i]] %= MOD
limit |= A[i]
print(dp[N % | 2][K])
|
miguelinux/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/GenFds/Fv.py | Python | gpl-2.0 | 16,415 | 0.011636 | ## @file
# process FV generation
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import subprocess
import StringIO
from struct import *
import Ffs
import AprioriSection
from GenFdsGlobalVariable import GenFdsGlobalVariable
from GenFds import GenFds
from CommonDataClass.FdfClass import FvClassObject
from Common.Misc import SaveFileOnChange
from Common.LongFilePathSupport import CopyLongFilePath
from Common.LongFilePathSupport import OpenLongFilePath as open
T_CHAR_LF = '\n'
## generate FV
#
#
class FV (FvClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FvClassObject.__init__(self)
self.FvInfFile = None
self.FvAddressFile = None
self.BaseAddress = None
self.InfFileName = None
self.FvAddressFileName = None
self.CapsuleName = None
self.FvBaseAddress = None
self.FvForceRebase = None
## AddToBuffer()
#
# Generate Fv and add it to the Buffer
#
# @param self The object pointer
# @param Buffer The buffer generated FV data will be put
# @param BaseAddress base address of FV
# @param BlockSize block size of FV
# @param BlockNum How many blocks in FV
# @param ErasePolarity Flash erase polarity
# @param VtfDict VTF objects
# @param MacroDict macro value pair
# @retval string Generated FV file path
#
def AddToBuffer (self, Buffer, BaseAddress=None, BlockSize= None, BlockNum=None, ErasePloarity='1', VtfDict=None, MacroDict = {}) :
if BaseAddress == None and self.UiFvName.upper() + 'fv' in GenFds.ImageBinDict.keys():
return GenFds.ImageBinDict[self.UiFvName.upper() + 'fv']
#
# Check whether FV in Capsule is in FD flash region.
# If yes, return error. Doesn't support FV in Capsule image is also in FD flash region.
#
if self.CapsuleName != None:
for FdName in GenFdsGlobalVariable.FdfParser.Profile.FdDict.keys():
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict[FdName]
for RegionObj in FdObj.RegionList:
if RegionObj.RegionType == 'FV':
for RegionData in RegionObj.RegionDataList:
if RegionData.endswith(".fv"):
continue
elif RegionData.upper() + 'fv' in GenFds.ImageBinDict.keys():
continue
elif self.UiFvName.upper() == RegionData.upper():
GenFdsGlobalVariable.ErrorLogger("Capsule %s in FD region can't contain a FV %s in FD region." % (self.CapsuleName, self.UiFvName.upper()))
GenFdsGlobalVariable.InfLogger( "\nGenerating %s FV" %self.UiFvName)
GenFdsGlobalVariable.LargeFileInFvFlags.append(False)
FFSGuid = None
if self.FvBaseAddress != None:
BaseAddress = self.FvBaseAddress
self.__InitializeInf__(BaseAddress, BlockSize, BlockNum, ErasePloarity, VtfDict)
#
# First Process the Apriori section
#
MacroDict.update(self.DefineVarDict)
GenFdsGlobalVariable.VerboseLogger('First generate Apriori file !')
FfsFileList = []
for AprSection in self.AprioriSectionList:
FileName = AprSection.GenFfs (self.UiFvName, MacroDict)
FfsFileList.append(FileName)
# Add Apriori file name to Inf file
self.FvInfFile.writelines("EFI_FILE_NAME = " + \
FileName + \
T_CHAR_LF)
# Process Modules in FfsList
for FfsFile in self.F | fsList :
FileName = FfsFile.GenFfs(MacroDict, FvParentAddr=BaseAddress)
FfsFileList.append(FileName)
self.FvInfFile.writelines("EFI_FILE_NAME = " + \
FileName + \
| T_CHAR_LF)
SaveFileOnChange(self.InfFileName, self.FvInfFile.getvalue(), False)
self.FvInfFile.close()
#
# Call GenFv tool
#
FvOutputFile = os.path.join(GenFdsGlobalVariable.FvDir, self.UiFvName)
FvOutputFile = FvOutputFile + '.Fv'
# BUGBUG: FvOutputFile could be specified from FDF file (FV section, CreateFile statement)
if self.CreateFileName != None:
FvOutputFile = self.CreateFileName
FvInfoFileName = os.path.join(GenFdsGlobalVariable.FfsDir, self.UiFvName + '.inf')
CopyLongFilePath(GenFdsGlobalVariable.FvAddressFileName, FvInfoFileName)
OrigFvInfo = None
if os.path.exists (FvInfoFileName):
OrigFvInfo = open(FvInfoFileName, 'r').read()
if GenFdsGlobalVariable.LargeFileInFvFlags[-1]:
FFSGuid = GenFdsGlobalVariable.EFI_FIRMWARE_FILE_SYSTEM3_GUID;
GenFdsGlobalVariable.GenerateFirmwareVolume(
FvOutputFile,
[self.InfFileName],
AddressFile=FvInfoFileName,
FfsList=FfsFileList,
ForceRebase=self.FvForceRebase,
FileSystemGuid=FFSGuid
)
NewFvInfo = None
if os.path.exists (FvInfoFileName):
NewFvInfo = open(FvInfoFileName, 'r').read()
if NewFvInfo != None and NewFvInfo != OrigFvInfo:
FvChildAddr = []
AddFileObj = open(FvInfoFileName, 'r')
AddrStrings = AddFileObj.readlines()
AddrKeyFound = False
for AddrString in AddrStrings:
if AddrKeyFound:
#get base address for the inside FvImage
FvChildAddr.append (AddrString)
elif AddrString.find ("[FV_BASE_ADDRESS]") != -1:
AddrKeyFound = True
AddFileObj.close()
if FvChildAddr != []:
# Update Ffs again
for FfsFile in self.FfsList :
FileName = FfsFile.GenFfs(MacroDict, FvChildAddr, BaseAddress)
if GenFdsGlobalVariable.LargeFileInFvFlags[-1]:
FFSGuid = GenFdsGlobalVariable.EFI_FIRMWARE_FILE_SYSTEM3_GUID;
#Update GenFv again
GenFdsGlobalVariable.GenerateFirmwareVolume(
FvOutputFile,
[self.InfFileName],
AddressFile=FvInfoFileName,
FfsList=FfsFileList,
ForceRebase=self.FvForceRebase,
FileSystemGuid=FFSGuid
)
#
# Write the Fv contents to Buffer
#
FvFileObj = open ( FvOutputFile,'r+b')
GenFdsGlobalVariable.VerboseLogger( "\nGenerate %s FV Successfully" %self.UiFvName)
GenFdsGlobalVariable.SharpCounter = 0
Buffer.write(FvFileObj.read())
FvFileObj.seek(0)
# PI FvHeader is 0x48 byte
FvHeaderBuffer = FvFileObj.read(0x48)
# FV alignment position.
FvAlignmentValue = 1 << (ord (FvHeaderBuffer[0x2E]) & 0x1F)
# FvAlignmentValue is larger than or equal to 1K
if FvAlignmentValue >= 0x400:
if FvAlignmentValue >= 0x10000:
#The max alignment supported by FFS is 64K.
self.FvAli |
miguelgrinberg/flasky | config.py | Python | mit | 3,883 | 0 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
MAIL_SERVER = os.environ.get('MAIL_SERVER', 'smtp.googlemail.com')
MAIL_PORT = int(os.environ.get('MAIL_PORT', '587'))
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS', 'true').lower() in \
['true', 'on', '1']
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
SSL_REDIRECT = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_RECORD_QUERIES = True
FLASKY_POSTS_PER_PAGE = 20
FLASKY_FOLLOWERS_PER_PAGE = 50
FLASKY_COMMENTS_PER_PAGE = 30
FLASKY_SLOW_DB_QUERY_TIME = 0.5
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite://'
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
# email errors to the administrators
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.FLASKY_MAIL_SENDER,
toaddrs=[cls.FLASKY_ADMIN],
subject=cls.FLASKY_MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
class HerokuConfig(ProductionConfig):
SSL_REDIRECT = True if os.environ.get('DYNO') else False
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# handle reverse proxy server headers
try:
from werkzeug.middleware.proxy_fix import ProxyFix
except ImportError:
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
class DockerConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.INFO)
app.logger.addHandle | r(file_handler)
class UnixConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to syslog
import logging
from logging.handlers import SysLogHandler
syslog_handler = SysLogHandler()
syslog_handler.setLevel(logging.INFO)
app.logger.addHandler(syslog_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuCo | nfig,
'docker': DockerConfig,
'unix': UnixConfig,
'default': DevelopmentConfig
}
|
mackst/glm | glm/detail/func_geometric.py | Python | mit | 3,822 | 0.00314 | # -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2014-2018 Shi Chi(Mack Stone)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from .func_exponential import *
if sys.version_info > (3, 0):
long = int
def length(x):
"""Returns the length of x, i.e., sqrt(x * x).
:param x: Floating-point vector types.
.. seealso::
`GLSL length man page <http://www.opengl.org/sdk/docs/manglsl/xhtml/length.xml>`_
`GLSL 4.20.8 specification, section 8.5 Geometric Functions <http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf>`_"""
# TODO: implement vec2 type
# if isinstance(x, Vec2):
# sqr = x.x * x.x + x.y * x.y
# return math.sqrt(sqr)
if isinstance(x, Vec3):
sqr = x.x * x.x + x.y * x.y + x.z * x.z
return math.sqrt(sqr)
elif isinstance(x, Vec4):
sqr = x.x * x.x + x.y * x.y + x.z * x.z + x.w * x.w
return math.sqrt(sqr)
elif isinstance(x, float) or isinstance(x, int) or isinstance(x, long):
return abs(x)
else:
raise TypeError('unsupport type %s' % type(x))
def dot(x, y):
"""Returns the dot product of x and y, i.e., result = x * y.
:param x: Floating-point vector types.
.. seealso::
`GLSL dot man page <http://www.opengl.org/sdk/docs/manglsl/xhtml/dot.xml>`_
`GLSL 4.20.8 specification, section 8.5 Geometric Functions <http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf>`_"""
# TODO: implement vec2
# if isinstance(x, Vec2) and isinstance(y, Vec2):
# tmp = Vec2(x * y)
# return tmp.x + tmp.y
if isinstance(x, Vec3) and isinstance(y, Vec3):
tmp = Vec3(x * y)
return tmp.x + tmp.y + tmp.z
elif isinstance(x, Vec4) and isinstance(y, Vec4):
tmp = Vec4(x * y)
return (tmp.x + tmp.y) + (tmp.z + tmp.w)
elif isinstance(x, float) or isinstance(x, int) or isinstance(x, long):
return x * y
else:
raise TypeError('unsupport type %s' % type(x))
def normalize(x):
"""Returns a vector in the same direction as x but with length of 1.
.. seealso::
`GLSL normalize man page <http://www.opengl.org/sdk/docs/manglsl/xhtml/normalize.xml>`_
`GLSL 4.20.8 specification, section 8.5 Geometric Functions <http://www.opengl.org/registry/doc/GLSLangSpec.4.20.8.pdf>`_"""
if isinstance(x, float) or isinstance(x, int) or isinstance(x, long):
ret | urn -1.0 if x < 0.0 else 1.0
#elif isinstance(x, Vec2):
| #sqr = x.x * x.x + x.y * x.y
#return x * inversesqrt(sqr)
elif isinstance(x, Vec3):
sqr = x.x * x.x + x.y * x.y + x.z * x.z
return x * inversesqrt(sqr)
elif isinstance(x, Vec4):
sqr = x.x * x.x + x.y * x.y + x.z * x.z + x.w * x.w
return x * inversesqrt(sqr)
|
TimDettmers/spodernet | examples/snli_verbose.py | Python | mit | 10,703 | 0.004298 | '''This models is an example for training a classifier on SNLI'''
from __future__ import print_function
from os.path import join
import nltk
import numpy as np
import os
import urllib
import zipfile
import sys
from spodernet.hooks import AccuracyHook, LossHook, ETAHook
from spodernet.preprocessing.pipeline import Pipeline
from spodernet.preprocessing.processors import AddToVocab, CreateBinsByNestedLength, SaveLengthsToState, ConvertTokenToIdx, StreamToHDF5, Tokenizer, NaiveNCharTokenizer
from spodernet.preprocessing.processors import JsonLoaderProcessors, DictKey2ListMapper, RemoveLineOnJsonValueCondition, ToLower
from spodernet.preprocessing.batching import StreamBatcher
from spodernet.utils.logger import Logger, LogLevel
from spodernet.utils.global_config import Config, Backends
from spodernet.utils.util import get_data_path
from torch.nn.modules.rnn import LSTM
from torch.autograd import Variable
import torch
Config.parse_argv(sys.argv)
np.set_printoptions(suppress=True)
class Net(torch.nn.Module):
def __init__(self, num_embeddings, num_labels):
super(Net, self).__init__()
self.emb = torch.nn.Embedding(num_embeddings, Config.embedding_dim, padding_idx=0)
self.lstm1 = LSTM(Config.embedding_dim, Config.hidden_size, num_layers=1, batch_first=True, bias=True, dropout=Config.dropout, bidirectional=True)
self.lstm2 = LSTM(Config.embedding_dim, Config.hidden_size, num_layers=1, batch_first=True, bias=True, dropout=Config.dropout, bidirectional=True)
self.linear = torch.nn.Linear(Config.hidden_size*4, num_labels)
self.loss = torch.nn.CrossEntropyLoss()
self.pred = torch.nn.Softmax()
self.h0 = Variable(torch.zeros(2,Config.batch_size, Config.hidden_size))
self.c0 = Variable(torch.zeros(2,Config.batch_size, Config.hidden_size))
self.h1 = Variable(torch.zeros(2,Config.batch_size, Config.hidden_size))
self.c1 = Variable(torch.zeros(2,Config.batch_size, Config.hidden_size))
if Config.cuda:
self.h0 = self.h0.cuda()
self.c0 = self.c0.cuda()
self.h1 = self.h1.cuda()
self.c1 = self.c1.cuda()
def forward(self, str2var):
inp = str2var['input']
sup = str2var['support']
l1 = str2var['input_length']
l2 = str2var['support_length']
t = str2var['target']
self.h0.data.zero_()
self.c0.data.zero_()
inp_seq = self.emb(inp)
sup_seq = self.emb(sup)
out1, hid1 = self.lstm1(inp_seq, (self.h0, self.c0))
out2, hid2 = self.lstm2(sup_seq, hid1)
outs1 = []
outs2 = []
for i in range(Config.batch_size):
outs1.append(out1[i,l1.data[i]-1, :])
outs2.append(out2[i,l2.data[i]-1, :])
out1_stacked = torch.stack(outs1, 0)
out2_stacked = torch.stack(outs2, 0)
out = torch.cat([out1_stacked, out2_stacked], 1)
projected = self.linear(out)
loss = self.loss(projected, t)
max_values, argmax = torch.topk(self.pred(projected),1)
return loss, argmax
def download_snli():
'''Creates data and snli paths and downloads SNLI in the home dir'''
home = os.environ['HOME']
data_dir = join(home, '.data')
snli_dir = join(data_dir, 'snli')
snli_url = 'http://nlp.stanford.edu/projects/snli/snli_1.0.zip'
if not os.path.exists(data_dir):
os.mkdir(data_dir)
if not os.path.exists(snli_dir):
os.mkdir(snli_dir)
if not os.path.exists(join(data_dir, 'snli_1.0.zip')):
print('Downloading SNLI...')
snlidownload = urllib.URLopener()
snlidownload.retrieve(snli_url, join(data_dir, "snli_1.0.zip"))
print('Opening zip file...')
archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r')
return archive, snli_dir
def snli2json():
'''Preprocesses SNLI data and returns to spoder files'''
files = ['snli_1.0_train.jsonl', 'snli_1.0_dev.jsonl',
'snli_1.0_test.jsonl']
archive, snli_dir = download_snli()
new_files = ['train.data', 'dev.data', 'test.data']
names = ['train', 'dev', 'test']
if not os.path.exists(join(snli_dir, new_files[0])):
for name, new_name in zip(files, new_files):
print('Writing {0}...'.format(new_name))
archive = zipfile.ZipFile(join(data_dir, 'snli_1.0.zip'), 'r')
snli_file = archive.open(join('snli_1.0', name), 'r')
with open(join(snli_dir, new_name), 'w') as datafile:
for line in snli_file:
data = json.loads((line))
| if data['gold_label'] == '-':
continue
premise = data['sentence1']
hypothe | sis = data['sentence2']
target = data['gold_label']
datafile.write(
json.dumps([premise, hypothesis, target]) + '\n')
return [names, [join(snli_dir, new_name) for new_name in new_files]]
def preprocess_SNLI(delete_data=False):
# load data
#names, file_paths = snli2json()
#train_path, dev_path, test_path = file_paths
tokenizer = nltk.tokenize.WordPunctTokenizer()
zip_path = join(get_data_path(), 'snli_1.0.zip', 'snli_1.0')
file_paths = ['snli_1.0_train.jsonl', 'snli_1.0_dev.jsonl', 'snli_1.0_test.jsonl']
not_t = []
t = ['input', 'support', 'target']
# tokenize and convert to hdf5
# 1. Setup pipeline to save lengths and generate vocabulary
p = Pipeline('snli_example', delete_data)
p.add_path(join(zip_path, file_paths[0]))
p.add_line_processor(JsonLoaderProcessors())
p.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p.add_sent_processor(ToLower())
p.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p.add_token_processor(AddToVocab())
p.add_post_processor(SaveLengthsToState())
p.execute()
p.clear_processors()
p.state['vocab'].save_to_disk()
# 2. Process the data further to stream it to hdf5
p.add_sent_processor(ToLower())
p.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p.add_post_processor(ConvertTokenToIdx())
p.add_post_processor(CreateBinsByNestedLength('snli_train', min_batch_size=128))
state = p.execute()
# dev and test data
p2 = Pipeline('snli_example')
p2.copy_vocab_from_pipeline(p)
p2.add_path(join(zip_path, file_paths[1]))
p2.add_line_processor(JsonLoaderProcessors())
p2.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p2.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p2.add_sent_processor(ToLower())
p2.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p2.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p2.add_post_processor(SaveLengthsToState())
p2.execute()
p2.clear_processors()
p2.add_sent_processor(ToLower())
p2.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p2.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p2.add_post_processor(ConvertTokenToIdx())
p2.add_post_processor(StreamToHDF5('snli_dev'))
p2.execute()
p3 = Pipeline('snli_example')
p3.copy_vocab_from_pipeline(p)
p3.add_path(join(zip_path, file_paths[2]))
p3.add_line_processor(JsonLoaderProcessors())
p3.add_line_processor(RemoveLineOnJsonValueCondition('gold_label', lambda label: label == '-'))
p3.add_line_processor(DictKey2ListMapper(['sentence1', 'sentence2', 'gold_label']))
p3.add_sent_processor(ToLower())
p3.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p3.add_sent_processor(NaiveNCharTokenizer(3), not_t)
p3.add_post_processor(SaveLengthsToState())
p3.execute()
p3.clear_processors()
p3.add_sent_processor(ToLower())
p3.add_sent_processor(Tokenizer(tokenizer.tokenize), t)
#p3.add_sent_processor(NaiveNCharTokenizer(3), no |
Shakajiub/RoboJiub | src/irc.py | Python | gpl-3.0 | 6,161 | 0.004545 | import re
import sys
import socket
from src.config import *
class IRC:
def __init__(self, queue):
self.queue = queue
def end_connection(self):
"""Send a goodbye message through irc and close the socket."""
try:
self.send_custom_message('goodbye')
channel = get_channel()
if channel:
self.sock.send('PART {0}\r\n'.format(channel))
self.sock.close()
except Exception:
self.queue.put(("{0}".format(sys.exc_info()[0]), 'BG_error'))
self.queue.put(("irc.end_connection() - Could not close socket!", 'BG_error'))
def check_for_message(self, data, queue):
"""Parse data from twitch into a nice dictionary."""
try:
data = data.decode('utf-8').split(" :")
except UnicodeDecodeError:
# TODO - This generally means the message is spam, so /timeout the user
queue.put(("check_for_message() - Can't decode chat message!", 'BG_error'))
return False
if len(data) > 3:
return False
if len(data) > 1 and len(data[1].split(' ')) > 1:
msg_type = data[1].split(' ')[1]
msg_data = { 'type': msg_type }
params = data[0][1:].split(';')
for param in params:
p = param.split('=')
# NOTE - This gave an index error on something, possibly when the stream ended in a raid
msg_data[p[0]] = p[1]
if len(data) > 2:
msg_data['message'] = data[2][:-2]
return msg_data
else: print(data)
return False
def check_for_ping(self, data):
"""If given data starts with PING, send PONG + rest of the data back."""
if data.startswith('PING'):
try:
self.sock.send('PONG {0}\r\n'.format(data[5:]))
except Exception:
self.queue.put(("{0}".format(sys.exc_info()[0]), 'BG_error'))
self.queue.put(("irc.check_for_ping() - Could not respond with PONG!", 'BG_error'))
def check_login_status(self, data):
"""Return false if given data says login was unsuccessful. True otherwise."""
if re.match(r'^:(tmi\.twitch\.tv) NOTICE \* :Login unsuccessful\r\n$', data):
return False
return True
def send_message(self, message):
"""Try to send given message as PRIVMSG through the irc socket."""
if message == None:
return
try:
channel = get_channel()
if channel:
self.sock.send('PRIVMSG {0} : {1}\n'.format(channel, message.encode('utf-8')))
except Exception:
self.queue.put(("{0}".format(sys.exc_info()[0]), 'BG_error'))
self.queue.put(("irc.send_message() - Could not send message!", 'BG_error'))
def send_custom_message(self, message, data=None):
"""Try to send given message (if defined in the config)."""
config = get_config()
try:
if config['messages'][message]['enabled']:
msg = self.format_custom_message(message, config['messages'][message]['msg'], data)
self.queue.put(("[{0}]: {1}".format(get_botname(), msg), 'BG_chat'))
self.send_message(msg)
except KeyError:
self.queue.put(("irc.send_custom_message() - Could not send message '{0}'!".format(message), 'BG_error'))
def format_custom_message(self, message, text, data):
"""A | dd relevant data to custom bot messages."""
if message == "cheer" and len(data) == 2:
text = text. | format(user=data[0], bits=data[1])
elif message == "sub" and len(data) == 5:
text = text.format(user=data[0], streak=data[1], tier=data[2], plan=data[3], count=data[4])
elif message == "raid" and len(data) == 2:
text = text.format(user=data[0], raiders=data[1])
return text
def get_socket_object(self):
"""Connect and join irc channels as setup in the config. Return None or the socket."""
config = get_config()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
self.sock = sock
if not self.connect_socket(config):
return None
sock.settimeout(None)
channel = self.login_socket(config)
if not channel:
return None
self.sock.send('CAP REQ :twitch.tv/tags\r\n'.encode('utf-8'))
self.sock.send('CAP REQ :twitch.tv/commands\r\n'.encode('utf-8'))
self.sock.send('JOIN {0}\r\n'.format(channel))
self.send_custom_message('greeting')
return sock
def connect_socket(self, config):
"""Connect our socket as defined in the config. Return true on success."""
try:
self.sock.connect((config['irc']['server'], config['irc']['port']))
return True
except KeyError:
self.queue.put(("irc.connect_socket() - IRC config is corrupted!", 'BG_error'))
except Exception:
self.queue.put(("{0}".format(sys.exc_info()[0]), 'BG_error'))
self.queue.put(("irc.connect_socket() - Cannot connect to server!", 'BG_error'))
return False
def login_socket(self, config):
"""Login to the IRC channel as defined in the config. Return the channel name joined."""
try:
self.sock.send('USER {0}\r\n'.format(config['irc']['username']))
self.sock.send('PASS {0}\r\n'.format(config['irc']['oauth_password']))
self.sock.send('NICK {0}\r\n'.format(config['irc']['username']))
channel = config['irc']['channel']
except KeyError:
self.queue.put(("irc.login_socket() - IRC config is corrupted!", 'BG_error'))
return False
if self.check_login_status(self.sock.recv(1024)):
self.queue.put(("Login successful, joining channel {0}".format(channel), 'BG_success'))
else:
self.queue.put(("Login failed (possibly invalid oauth token)", 'BG_error'))
return False
return channel
|
alexlo03/ansible | lib/ansible/plugins/action/voss_config.py | Python | gpl-3.0 | 4,227 | 0.00071 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=to_text(exc))
| result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module | .
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in list(result.keys()):
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
|
monitoringartist/zenoss-searcher | scripts/gen-source-wiki.py | Python | gpl-2.0 | 649 | 0.006163 | #!/usr/bin/env python
from pyquery import PyQuery as pq
from lxml import etree
import | urllib, json, ziconizing, collections
arr = {}
d = pq(url='http://wiki.zenoss.org/All_ZenPacks')
for a in d('.smw-column li a'):
name = a.text.strip() + ' ZenPack'
if name.startswith('ZenPack.'):
continue
url = 'http://wiki.zenoss.org' + a.get('href')
arr[name.replace(' ','-')] = {
'name': name,
'url': url,
'keywords': name.lower().replace(' ZenPack','').split(' '),
'icon': ziconizing.iconizing(name | , name.lower().split(' '))
}
oarr = collections.OrderedDict(sorted(arr.items()))
print json.dumps(oarr)
|
vi4m/django-dedal | dedal/__init__.py | Python | bsd-3-clause | 71 | 0 | _ | _version__ = '0.1.0'
from dedal.site import site
__all__ = ['site' | ]
|
pombreda/httpplus | httpplus/tests/test_bogus_responses.py | Python | bsd-3-clause | 2,832 | 0.000353 | # Copyright 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR | T
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# pylint: disable=protected-access,missing-docstring,too-few-public-methods,invalid-name,too-many-public-methods
"""Tests against malformed responses.
Server implementations that respond with only LF instead | of CRLF have
been observed. Checking against ones that use only CR is a hedge
against that potential insanit.y
"""
import unittest
import httpplus
# relative import to ease embedding the library
import util
class SimpleHttpTest(util.HttpTestBase, unittest.TestCase):
def bogusEOL(self, eol):
con = httpplus.HTTPConnection('1.2.3.4:80')
con._connect()
con.sock.data = ['HTTP/1.1 200 OK%s' % eol,
'Server: BogusServer 1.0%s' % eol,
'Content-Length: 10',
eol * 2,
'1234567890']
con.request('GET', '/')
expected_req = ('GET / HTTP/1.1\r\n'
'Host: 1.2.3.4\r\n'
'accept-encoding: identity\r\n\r\n')
self.assertEqual(('1.2.3.4', 80), con.sock.sa)
self.assertEqual(expected_req, con.sock.sent)
self.assertEqual('1234567890', con.getresponse().read())
def testOnlyLinefeed(self):
self.bogusEOL('\n')
def testOnlyCarriageReturn(self):
self.bogusEOL('\r')
|
apache/incubator-airflow | airflow/api_connexion/schemas/provider_schema.py | Python | apache-2.0 | 1,452 | 0 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You | may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
| # Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List, NamedTuple
from marshmallow import Schema, fields
class ProviderSchema(Schema):
"""Provider schema"""
package_name = fields.String(required=True)
description = fields.String(required=True)
version = fields.String(required=True)
class ProviderCollection(NamedTuple):
"""List of Providers"""
providers: List[ProviderSchema]
total_entries: int
class ProviderCollectionSchema(Schema):
"""Provider Collection schema"""
providers = fields.List(fields.Nested(ProviderSchema))
total_entries = fields.Int()
provider_collection_schema = ProviderCollectionSchema()
provider_schema = ProviderSchema()
|
glue-viz/echo | echo/qt/tests/test_connect_combo_selection.py | Python | mit | 3,424 | 0.000584 | import pytest
import numpy as np
from qtpy import QtWidgets
from echo.core import CallbackProperty
from echo.selection import SelectionCallbackProperty, ChoiceSeparator
from echo.qt.connect import connect_combo_selection
class Example(object):
a = SelectionCallbackProperty(default_index=1)
b = CallbackProperty()
def test_connect_combo_selection():
t = Example()
a_prop = getattr(type(t), 'a')
a_prop.set_choices(t, [4, 3.5])
a_prop.set_display_func(t, lambda x: 'value: {0}'.format(x))
combo = QtWidgets.QComboBox()
c1 = connect_combo_selection(t, 'a', combo) # noqa
assert combo.itemText(0) == 'value: 4'
assert combo.itemText(1) == 'value: 3.5'
assert combo.itemData(0).data == 4
assert combo.itemData(1).data == 3.5
combo.setCurrentIndex(1)
assert t.a == 3.5
combo.setCurrentIndex(0)
assert t.a == 4
combo.setCurrentIndex(-1)
assert t.a is None
t.a = 3.5
assert combo.currentIndex() == 1
t.a = 4
a | ssert combo.currentIndex() == 0
with pytest.raises(ValueError) as exc:
t.a = 2
assert exc.value.args[0] == 'value 2 is not in valid choices: [4, 3.5]'
t.a = None
assert combo.currentInde | x() == -1
# Changing choices should change Qt combo box. Let's first try with a case
# in which there is a matching data value in the new combo box
t.a = 3.5
assert combo.currentIndex() == 1
a_prop.set_choices(t, (4, 5, 3.5))
assert combo.count() == 3
assert t.a == 3.5
assert combo.currentIndex() == 2
assert combo.itemText(0) == 'value: 4'
assert combo.itemText(1) == 'value: 5'
assert combo.itemText(2) == 'value: 3.5'
assert combo.itemData(0).data == 4
assert combo.itemData(1).data == 5
assert combo.itemData(2).data == 3.5
# Now we change the choices so that there is no matching data - in this case
# the index should change to that given by default_index
a_prop.set_choices(t, (4, 5, 6))
assert t.a == 5
assert combo.currentIndex() == 1
assert combo.count() == 3
assert combo.itemText(0) == 'value: 4'
assert combo.itemText(1) == 'value: 5'
assert combo.itemText(2) == 'value: 6'
assert combo.itemData(0).data == 4
assert combo.itemData(1).data == 5
assert combo.itemData(2).data == 6
# Finally, if there are too few choices for the default_index to be valid,
# pick the last item in the combo
a_prop.set_choices(t, (9,))
assert t.a == 9
assert combo.currentIndex() == 0
assert combo.count() == 1
assert combo.itemText(0) == 'value: 9'
assert combo.itemData(0).data == 9
# Now just make sure that ChoiceSeparator works
separator = ChoiceSeparator('header')
a_prop.set_choices(t, (separator, 1, 2))
assert combo.count() == 3
assert combo.itemText(0) == 'header'
assert combo.itemData(0).data is separator
# And setting choices to an empty iterable shouldn't cause issues
a_prop.set_choices(t, ())
assert combo.count() == 0
# Try including an array in the choices
a_prop.set_choices(t, (4, 5, np.array([1, 2, 3])))
def test_connect_combo_selection_invalid():
t = Example()
combo = QtWidgets.QComboBox()
with pytest.raises(TypeError) as exc:
connect_combo_selection(t, 'b', combo)
assert exc.value.args[0] == 'connect_combo_selection requires a SelectionCallbackProperty'
|
eschleicher/flask_shopping_list | venv/lib/python3.4/site-packages/argh/compat.py | Python | mit | 2,834 | 0.000706 | # originally inspired by "six" by Benjamin Peterson
import inspect
import sys
if sys.version_info < (3,0):
text_type = unicode
binary_type = str
import StringIO
StringIO = BytesIO = StringIO.StringIO
else:
text_type = str
binary_type = bytes
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
def getargspec_permissive(func):
"""
An `inspect.getargspec` with a relaxed sanity check to support Cython.
Motivation:
A Cython-compiled function is *not* an instance of Python's
types.FunctionType. That is the sanity check the standard Py2
library uses in `inspect.getargspec()`. So, an exception is raised
wh | en calling `argh.dispatch_command(cythonCompiledFunc)`. However,
the CyFunctions do have | perfectly usable `.func_code` and
`.func_defaults` which is all `inspect.getargspec` needs.
This function just copies `inspect.getargspec()` from the standard
library but relaxes the test to a more duck-typing one of having
both `.func_code` and `.func_defaults` attributes.
"""
if inspect.ismethod(func):
func = func.im_func
# Py2 Stdlib uses isfunction(func) which is too strict for Cython-compiled
# functions though such have perfectly usable func_code, func_defaults.
if not (hasattr(func, "func_code") and hasattr(func, "func_defaults")):
raise TypeError('{!r} missing func_code or func_defaults'.format(func))
args, varargs, varkw = inspect.getargs(func.func_code)
return inspect.ArgSpec(args, varargs, varkw, func.func_defaults)
if sys.version_info < (3,0):
getargspec = getargspec_permissive
else:
# in Python 3 the basic getargspec doesn't support keyword-only arguments
# and annotations and raises ValueError if they are discovered
getargspec = inspect.getfullargspec
class _PrimitiveOrderedDict(dict):
"""
A poor man's OrderedDict replacement for compatibility with Python 2.6.
Implements only the basic features. May easily break if non-overloaded
methods are used.
"""
def __init__(self, *args, **kwargs):
super(_PrimitiveOrderedDict, self).__init__(*args, **kwargs)
self._seq = []
def __setitem__(self, key, value):
super(_PrimitiveOrderedDict, self).__setitem__(key, value)
if key not in self._seq:
self._seq.append(key)
def __delitem__(self, key):
super(_PrimitiveOrderedDict, self).__delitem__(key)
idx = self._seq.index(key)
del self._seq[idx]
def __iter__(self):
return iter(self._seq)
def keys(self):
return list(self)
def values(self):
return [self[k] for k in self]
try:
from collections import OrderedDict
except ImportError:
OrderedDict = _PrimitiveOrderedDict
|
fsalamero/pilas | pilasengine/ejemplos/ejemplos_a_revisar/deslizador.py | Python | lgpl-3.0 | 842 | 0.003563 | import pilasengine
# Permite que este ejemplo funcion incluso si no has instalado pilas.
import sys
sys.path.insert(0, "..")
pila | s = pilasengine.iniciar()
mono = pilas.actores.Mono(y=-100)
def cuando_cambia_escala(valor):
mono.escala = valor * 2
deslizador_escala = pilas.interfaz.Deslizador(y=50 | )
deslizador_escala.conectar(cuando_cambia_escala)
def cuando_cambia_rotacion(valor):
mono.rotacion = valor * 360
deslizador_rotacion = pilas.interfaz.Deslizador(y=100)
deslizador_rotacion.conectar(cuando_cambia_rotacion)
def cuando_cambia_posicion(valor):
# Obtiene valores entre -200 y 400
mono.x = -200 + 400 * valor
print valor
deslizador_posicion = pilas.interfaz.Deslizador(y=150)
deslizador_posicion.conectar(cuando_cambia_posicion)
pilas.avisar("Usa el deslizador para modificar al mono.")
pilas.ejecutar()
|
e-koch/FilFinder | fil_finder/tests/setup_package.py | Python | mit | 202 | 0 | def get_package_data():
return {
_ASTROPY_PACKAGE_NAME_ + '.tests': | ['data/*.fits',
| 'data/*.hdf5',
]}
|
brysonreece/Stream | resources/site-packages/stream/socks.py | Python | gpl-3.0 | 27,176 | 0.003422 | """
SocksiPy - Python SOCKS module.
Version 1.5.0
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
===============================================================================
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
Modifications made by Anorov (https://github.com/Anorov)
-Forked and renamed to PySocks
-Fixed issue with HTTP proxy failure checking (same bug that was in the old ___recvall() method)
-Included SocksiPyHandler (sockshandler.py), to be used as a urllib2 handler,
courtesy of e000 (https://github.com/e000): https://gist.github.com/869791#file_socksipyhandler.py
-Re-styled code to make it readable
-Aliased PROXY_TYPE_SOCKS5 -> SOCKS5 etc.
-Improved exception handling and output
-Removed irritating use of sequence indexes, replaced with tuple unpacked variables
-Fixed up Python 3 bytestring handling - chr(0x03).encode() -> b"\x03"
-Other general fixes
-Added clarification that the HTTP proxy connection method only supports CONNECT-style tunneling HTTP proxies
-Various small bug fixes
"""
__version__ = "1.5.1"
import socket
import struct
import io
from errno import EOPNOTSUPP, EINVAL, EAGAIN
from io import BytesIO
"""Had to comment out, it seem XBMC supports the io module, just not all of it."""
"""Unsure how this will affect SOCKS5 functionality"""
from collections import Callable
PROXY_TYPE_SOCKS4 = SOCKS4 = 1
PROXY_TYPE_SOCKS5 = SOCKS5 = 2
PROXY_TYPE_HTTP = HTTP = 3
PRINTABLE_PROXY_TYPES = {SOCKS4: "SOCKS4", SOCKS5: "SOCKS5", HTTP: "HTTP"}
_orgsocket = _orig_socket = socket.socket
class ProxyError(IOError):
"""
socket_err contains original socket.error exception.
"""
def __init__(self, msg, socket_err=None):
self.msg = msg
self.socket_err = socket_err
if socket_err:
self.msg += ": {0}".format(socket_err)
def __str__(self):
return self.msg
class GeneralProxyError(ProxyError): pass
class ProxyConnectionError(ProxyError): pass
class SOCKS5AuthError(ProxyError): pass
class SOCKS5Error(ProxyError): pass
class SOCKS4Error(ProxyError): pass
class HTTPError(ProxyError): pass
SOCKS4_ERRORS = { 0x5B: "Request rejected or failed",
0x5C: "Request rejected because SOCKS server cannot connect to identd on the client",
0x5D: "Request rejected because the client program and identd report different user-ids"
}
SOCKS5_ERRORS = { 0x01: "General SOCKS server failure",
0x02: "Connection not allowed by ruleset",
0x03: "Network unreachable",
0x04: "Host unreachable",
0x05: "Connection refused",
0x06: "TTL expired",
0x07: "Command not supported, or protocol error",
0x08: "Address type not supported"
}
DEFAULT_PORTS = { SOCKS4: 1080,
SOCKS5: 1080,
HTTP: 8080
}
def set_default_proxy(proxy_type=None, addr=None, port=None, rdns=True, username=None, password=None):
"""
set_default_proxy(proxy_type, addr[, port[, rdns[, username, password]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed. All parameters are as for socket.set_proxy().
"""
socksocket.default_proxy = (proxy_type, addr.encode(), port, rdns,
username.encode() if username else None,
password.encode() if password else None)
setdefaultproxy = set_default_proxy
def get_default_proxy():
"""
Returns the default proxy, set by set_default_proxy.
"""
return socksocket.default_proxy
getdefaultproxy = get_default_proxy
def wrap_module(module):
"""
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using set_default_proxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if socksocket.default_proxy:
module.socket.socket = socksocket
else:
raise GeneralProxyError("No default proxy specified")
wrapmodule = wrap_module
def create_connection(dest_pair, proxy_type=None, proxy_addr=None,
proxy_port=None, proxy_username=None,
proxy_password= | None, timeout=None):
"""create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object
Like socket.create_connection(), but connects to proxy
before returning the socket object.
dest_pair - 2-tuple of (IP/hostname, port).
**proxy_a | rgs - Same args passed to socksocket.set_proxy().
timeout - Optional socket timeout value, in seconds.
"""
sock = socksocket()
if isinstance(timeout, (int, float)):
sock.settimeout(timeout)
sock.set_proxy(proxy_type, proxy_addr, proxy_port,
proxy_username, proxy_password)
sock.connect(dest_pair)
return sock
class _BaseSocket(socket.socket):
"""Allows Python 2's "delegated" methods such as send() to be overridden
"""
def __init__(self, *pos, **kw):
_orig_socket.__init__(self, *pos, **kw)
self._savedmethods = dict()
for name in self._savenames:
self._savedmethods[name] = getattr(self, name)
delattr(self, name) # Allows normal overriding mechanism to work
_savenames = list()
def _makemethod(name):
return lambda self, *pos, **kw: self._savedmethods[name](*pos, **kw)
for name in ("sendto", "send", "recvfrom", "recv"):
method = getattr(_BaseSocket, name, None)
# Determine if the method is not defined the usual way
# as a function in the class.
# Python 2 uses __slots__, so there are descriptors for each method,
# but they are not functions.
if not isinstance(method, Callable):
_BaseSocket._savenames.append(name)
setattr(_BaseSocket, name, _makemethod(name))
class socksocket(_BaseSocket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET and proto=0.
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
"""
default_proxy = None
"""Quick fix for proxy issue - brysonreece"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREA |
1flow/1flow | oneflow/core/admin/processor.py | Python | agpl-3.0 | 5,116 | 0 | # -*- coding: utf-8 -*-
u"""
Copyright 2015 Olivier Cortès <oc@1flow.io>.
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
from django.conf import settings
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _ # , pgettext_lazy
from ..models.reldb import ( # NOQA
Processor,
ProcessingChain,
ChainedItem,
ChainedItemParameter,
ProcessingError,
ProcessorCategory,
)
from django import forms
from codemirror import CodeMirrorTextarea
class ProcessorAdminForm(forms.ModelForm):
""" Use CodeMirror widgets for code fields. """
class Meta:
model = Processor
widgets = {
'parameters': CodeMirrorTextarea(
mode='yaml',
addon_js=settings.CODEMIRROR_ADDONS_JS,
addon_css=settings.CODEMIRROR_ADDONS_CSS,
keymap=settings.CODEMIRROR_KEYMAP,
),
'requirements': CodeMirrorTextarea(
mode='shell',
addon_js=settings.CODEMIRROR_ADDONS_JS,
addon_css=settings.CODEMIRROR_ADDONS_CSS,
keymap=settings.CODEMIRROR_KEYMAP,
| ),
'accept_code': CodeMirrorTextarea(
mode='python',
addon_js=settings.CODEMIRROR_ADDONS_JS,
addon_css=settings.CODEMIRROR_ADDONS_CSS,
keymap=settings.CODEMIRROR_KEYMAP,
| ),
'process_code': CodeMirrorTextarea(
mode='python',
addon_js=settings.CODEMIRROR_ADDONS_JS,
addon_css=settings.CODEMIRROR_ADDONS_CSS,
keymap=settings.CODEMIRROR_KEYMAP,
),
}
class ProcessorAdmin(admin.ModelAdmin):
""" Processor admin class. """
form = ProcessorAdminForm
list_display = (
'id', 'name', 'slug',
'is_active', 'parent',
'short_description_en',
'user',
)
list_display_links = ('id', 'image', 'name', 'slug', )
list_filter = ('is_active', 'user', )
ordering = ('name', )
change_list_template = "admin/change_list_filter_sidebar.html"
change_list_filter_template = "admin/filter_listing.html"
search_fields = (
'name', 'url',
'short_description_en', 'short_description_fr',
)
fieldsets = (
(_(u'Main'), {
'classes': ('grp-collapse', ),
'fields': (
'name',
('slug', 'is_active', ),
('parameters', ),
),
}),
(_(u'Description'), {
'classes': ('grp-collapse grp-closed', ),
'fields': (
'short_description_en',
'description_en',
'short_description_fr',
'description_fr',
'short_description_nt',
'description_nt',
),
}),
(_(u'Code'), {
'classes': ('grp-collapse', ),
'fields': (
'requirements',
'accept_code',
'process_code',
),
}),
(_(u'Other (internals)'), {
'classes': ('grp-collapse grp-closed', ),
'fields': (
('source_uri', 'parent', 'user', ),
('duplicate_of', 'duplicate_status', ),
),
}),
)
# class ProcessingChainAdmin(admin.ModelAdmin):
# """ Processor admin class. """
# form = ProcessorAdminForm
# list_display = (
# 'id', 'name', 'is_active', 'parent',
# 'short_description_en',
# 'processor_type', 'needs_parameters',
# 'user',
# )
# list_display_links = ('id', 'image', 'name', 'slug', )
# list_filter = ('processor_type', 'needs_parameters', 'user', )
# ordering = ('name', )
# change_list_template = "admin/change_list_filter_sidebar.html"
# change_list_filter_template = "admin/filter_listing.html"
# search_fields = (
# 'name', 'url',
# 'short_description_en', 'short_description_fr',
# )
class ChainedItemAdmin(admin.ModelAdmin):
""" Chained item admin class. """
list_display = (
'id', 'chain', 'position', 'item',
'is_valid',
)
list_display_links = ('id', 'chain', 'position', )
list_filter = ('chain', 'is_valid', )
ordering = ('chain', 'position', )
change_list_template = "admin/change_list_filter_sidebar.html"
change_list_filter_template = "admin/filter_listing.html"
|
cpcloud/numba | numba/experimental/jitclass/base.py | Python | bsd-2-clause | 20,859 | 0.000192 | import inspect
import operator
import types as pytypes
import typing as pt
from collections import O | rderedDi | ct
from collections.abc import Sequence
from llvmlite import ir as llvmir
from numba import njit
from numba.core import cgutils, errors, imputils, types, utils
from numba.core.datamodel import default_manager, models
from numba.core.registry import cpu_target
from numba.core.typing import templates
from numba.core.typing.asnumbatype import as_numba_type
from numba.core.serialize import disable_pickling
from numba.experimental.jitclass import _box
##############################################################################
# Data model
class InstanceModel(models.StructModel):
def __init__(self, dmm, fe_typ):
cls_data_ty = types.ClassDataType(fe_typ)
# MemInfoPointer uses the `dtype` attribute to traverse for nested
# NRT MemInfo. Since we handle nested NRT MemInfo ourselves,
# we will replace provide MemInfoPointer with an opaque type
# so that it does not raise exception for nested meminfo.
dtype = types.Opaque('Opaque.' + str(cls_data_ty))
members = [
('meminfo', types.MemInfoPointer(dtype)),
('data', types.CPointer(cls_data_ty)),
]
super(InstanceModel, self).__init__(dmm, fe_typ, members)
class InstanceDataModel(models.StructModel):
def __init__(self, dmm, fe_typ):
clsty = fe_typ.class_type
members = [(_mangle_attr(k), v) for k, v in clsty.struct.items()]
super(InstanceDataModel, self).__init__(dmm, fe_typ, members)
default_manager.register(types.ClassInstanceType, InstanceModel)
default_manager.register(types.ClassDataType, InstanceDataModel)
default_manager.register(types.ClassType, models.OpaqueModel)
def _mangle_attr(name):
"""
Mangle attributes.
The resulting name does not startswith an underscore '_'.
"""
return 'm_' + name
##############################################################################
# Class object
_ctor_template = """
def ctor({args}):
return __numba_cls_({args})
"""
def _getargs(fn_sig):
"""
Returns list of positional and keyword argument names in order.
"""
params = fn_sig.parameters
args = []
for k, v in params.items():
if (v.kind & v.POSITIONAL_OR_KEYWORD) == v.POSITIONAL_OR_KEYWORD:
args.append(k)
else:
msg = "%s argument type unsupported in jitclass" % v.kind
raise errors.UnsupportedError(msg)
return args
@disable_pickling
class JitClassType(type):
"""
The type of any jitclass.
"""
def __new__(cls, name, bases, dct):
if len(bases) != 1:
raise TypeError("must have exactly one base class")
[base] = bases
if isinstance(base, JitClassType):
raise TypeError("cannot subclass from a jitclass")
assert 'class_type' in dct, 'missing "class_type" attr'
outcls = type.__new__(cls, name, bases, dct)
outcls._set_init()
return outcls
def _set_init(cls):
"""
Generate a wrapper for calling the constructor from pure Python.
Note the wrapper will only accept positional arguments.
"""
init = cls.class_type.instance_type.methods['__init__']
init_sig = utils.pysignature(init)
# get postitional and keyword arguments
# offset by one to exclude the `self` arg
args = _getargs(init_sig)[1:]
cls._ctor_sig = init_sig
ctor_source = _ctor_template.format(args=', '.join(args))
glbls = {"__numba_cls_": cls}
exec(ctor_source, glbls)
ctor = glbls['ctor']
cls._ctor = njit(ctor)
def __instancecheck__(cls, instance):
if isinstance(instance, _box.Box):
return instance._numba_type_.class_type is cls.class_type
return False
def __call__(cls, *args, **kwargs):
# The first argument of _ctor_sig is `cls`, which here
# is bound to None and then skipped when invoking the constructor.
bind = cls._ctor_sig.bind(None, *args, **kwargs)
bind.apply_defaults()
return cls._ctor(*bind.args[1:], **bind.kwargs)
##############################################################################
# Registration utils
def _validate_spec(spec):
for k, v in spec.items():
if not isinstance(k, str):
raise TypeError("spec keys should be strings, got %r" % (k,))
if not isinstance(v, types.Type):
raise TypeError("spec values should be Numba type instances, got %r"
% (v,))
def _fix_up_private_attr(clsname, spec):
"""
Apply the same changes to dunder names as CPython would.
"""
out = OrderedDict()
for k, v in spec.items():
if k.startswith('__') and not k.endswith('__'):
k = '_' + clsname + k
out[k] = v
return out
def _add_linking_libs(context, call):
"""
Add the required libs for the callable to allow inlining.
"""
libs = getattr(call, "libs", ())
if libs:
context.add_linking_libs(libs)
def register_class_type(cls, spec, class_ctor, builder):
"""
Internal function to create a jitclass.
Args
----
cls: the original class object (used as the prototype)
spec: the structural specification contains the field types.
class_ctor: the numba type to represent the jitclass
builder: the internal jitclass builder
"""
# Normalize spec
if spec is None:
spec = OrderedDict()
elif isinstance(spec, Sequence):
spec = OrderedDict(spec)
# Extend spec with class annotations.
for attr, py_type in pt.get_type_hints(cls).items():
if attr not in spec:
spec[attr] = as_numba_type(py_type)
_validate_spec(spec)
# Fix up private attribute names
spec = _fix_up_private_attr(cls.__name__, spec)
# Copy methods from base classes
clsdct = {}
for basecls in reversed(inspect.getmro(cls)):
clsdct.update(basecls.__dict__)
methods, props, static_methods, others = {}, {}, {}, {}
for k, v in clsdct.items():
if isinstance(v, pytypes.FunctionType):
methods[k] = v
elif isinstance(v, property):
props[k] = v
elif isinstance(v, staticmethod):
static_methods[k] = v
else:
others[k] = v
# Check for name shadowing
shadowed = (set(methods) | set(props) | set(static_methods)) & set(spec)
if shadowed:
raise NameError("name shadowing: {0}".format(', '.join(shadowed)))
docstring = others.pop('__doc__', "")
_drop_ignored_attrs(others)
if others:
msg = "class members are not yet supported: {0}"
members = ', '.join(others.keys())
raise TypeError(msg.format(members))
for k, v in props.items():
if v.fdel is not None:
raise TypeError("deleter is not supported: {0}".format(k))
jit_methods = {k: njit(v) for k, v in methods.items()}
jit_props = {}
for k, v in props.items():
dct = {}
if v.fget:
dct['get'] = njit(v.fget)
if v.fset:
dct['set'] = njit(v.fset)
jit_props[k] = dct
jit_static_methods = {
k: njit(v.__func__) for k, v in static_methods.items()}
# Instantiate class type
class_type = class_ctor(
cls,
ConstructorTemplate,
spec,
jit_methods,
jit_props,
jit_static_methods)
jit_class_dct = dict(class_type=class_type, __doc__=docstring)
jit_class_dct.update(jit_static_methods)
cls = JitClassType(cls.__name__, (cls,), jit_class_dct)
# Register resolution of the class object
typingctx = cpu_target.typing_context
typingctx.insert_global(cls, class_type)
# Register class
targetctx = cpu_target.target_context
builder(class_type, typingctx, targetctx).register()
as_numba_type.register(cls, class_type.instance_type)
return cls
class ConstructorTemplate(templates.AbstractTemplate):
"""
Base class for jitclass constructor templates.
"""
def g |
M4rtinK/pyside-android | tests/QtGui/qmenuadd_test.py | Python | lgpl-2.1 | 518 | 0.003861 | # -*- coding: utf-8 -*-
''' Test the QMenu.addAction() method'''
import unittest
import sys
from PySide import QtGui
from helper import UsesQApplication
class QMenuAddAction(UsesQApplication):
def openFile(self, *args):
self.arg = args
def testQMenuAddAction(self):
fileMen | u = QtGui.QMenu("&File")
addNewAction = fileMenu.addAction("&Open...", self.openFile)
addNewAction.trigger()
self.assertEqual | s(self.arg, ())
if __name__ == '__main__':
unittest.main()
|
PnCevennes/projet_suivi | docs/conf.py | Python | gpl-3.0 | 8,065 | 0.006324 | # -*- coding: utf-8 -*-
#
# test documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 2 11:29:03 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'projet suivi'
copyright = u'2015, ff'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warni | ngs as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options | available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'testdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'test.tex', u'test Documentation',
u'ff', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'test', u'test Documentation',
[u'ff'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'test', u'test Documentation',
u'ff', 'test', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
longnow/panlex-tools | libpython/ben/icu_tools.py | Python | mit | 2,046 | 0.00782 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import icu
def all_glyphs(prop):
return [c.encode('utf-16', 'surrogatepass').decode('utf-16') for c in icu.UnicodeSetIterator(icu.UnicodeSet(r'[:{}:]'.format(prop)))]
def all_cps(prop):
return list(map(glyph_cp, all_glyphs(prop)))
def glyph_name(glyph, default=''):
if len(glyph) > 1:
raise | TypeError('glyph must be a string with length of 1')
elif len(glyph) == 0:
return default
else:
return icu.Char.charName(glyph)
def cp_glyph(cp, default=''):
try:
return chr(int(cp, 16))
except ValueError:
return default
def cp_name(cp, de | fault=''):
return glyph_name(cp_glyph(cp), default)
def glyph_cp(glyph):
return hex(ord(glyph))[2:]
class Rbnf:
def __init__(self, tag="spellout", locale="", rules=""):
if rules:
self._rbnf = icu.RuleBasedNumberFormat(rules)
else:
if tag.lower() not in {"spellout", "duration", "ordinal", "numbering_system"}:
raise ValueError("tag must be 'spellout', 'duration', 'ordinal', or 'numbering_system'")
self._rbnf = icu.RuleBasedNumberFormat(getattr(icu.URBNFRuleSetTag, tag.upper()), icu.Locale(locale))
def format(self, number):
return self._rbnf.format(number)
@property
def ruleset(self):
return self._rbnf.getDefaultRuleSetName()
@ruleset.setter
def ruleset(self, ruleset_name):
try:
self._rbnf.setDefaultRuleSet(ruleset_name)
except icu.ICUError:
raise ValueError(f"{ruleset_name} not a valid ruleset. See {self.__class__.__name__}.rulesets() for valid rulesets.")
def rulesets(self):
return {self._rbnf.getRuleSetName(i) for i in range(self._rbnf.getNumberOfRuleSetNames())}
def rules(self):
return self._rbnf.getRules()
def parse(self, string):
try:
return self._rbnf.parse(string)
except icu.ICUError:
raise ValueError("unable to parse string")
|
asgeir/old-school-projects | python/verkefni2/cpattern.py | Python | mit | 1,006 | 0.000994 | # https://graphics.stanford.edu/~seander/bithacks.html#NextBitPermutation
def selector(values, setBits):
maxBits = len(values)
def select(v):
out = []
for i in range(maxBits):
if (v & (1 << i)):
out.append(values[i])
return out
v = (2 ** setBits) - 1
endState = v << (maxBits - setBits)
yield select(v)
while v != endState:
t = (v | (v - 1)) + 1
v = t | ((((t & (-t % (1 << maxBits))) // (v & (-v % (1 << maxBits)))) >> 1) - 1)
yield select(v)
def normalize(perm):
ref = sorted(perm)
re | turn [ref.index(x) for x in perm]
def contains_pattern(perm, patt):
if len(patt) > len(perm):
return False
for p in selector(perm, len(patt)):
if normalize(p) == patt:
return True
return Fa | lse
if __name__ == '__main__':
print(contains_pattern(
[14, 12, 6, 10, 0, 9, 1, 11, 13, 16, 17, 3, 7, 5, 15, 2, 4, 8],
[3, 0, 1, 2]))
print(True)
|
xxd3vin/spp-sdk | opt/Python27/Lib/site-packages/numpy/distutils/__config__.py | Python | mit | 912 | 0.025219 | # This file is generated by Z:\Users\rgommers\Code\numpy\setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
blas_info={}
lapack_info={}
atlas_threads_info={}
blas_src_info={}
blas_opt_info={}
lapack_src_info={}
atlas_b | las_threads_info={}
lapack_opt_info={}
atlas_info={}
lapack_mkl_info={}
blas_mkl_info={}
atlas_blas_info={}
mkl_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
pr | int(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
|
JavaRabbit/CS496_capstone | bigquery/rest/labels_test.py | Python | apache-2.0 | 1,254 | 0 | # Copyright 2016, Google, Inc.
# Licensed under the Apache License, Versi | on 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Lic | ense for the specific language governing permissions and
# limitations under the License.
import os
from labels import label_dataset, label_table
PROJECT = os.environ['GCLOUD_PROJECT']
def test_label_dataset(capsys):
label_dataset(
'test_dataset',
'environment',
'test',
project_id=PROJECT)
out, _ = capsys.readouterr()
result = out.split('\n')[0]
assert 'Updated label "environment" with value "test"' in result
def test_label_table(capsys):
label_table(
'test_dataset',
'test_table',
'data-owner',
'my-team',
project_id=PROJECT)
out, _ = capsys.readouterr()
result = out.split('\n')[0]
assert 'Updated label "data-owner" with value "my-team"' in result
|
nanuxbe/microbit-files | button_motor_control.py | Python | mit | 571 | 0 | from microbit import *
while True:
if button_a.is_pressed():
pin1.write_digital(1)
pin2.write_d | igital(1)
pin0.write_digital(0)
pin8.write_digital(0)
display.show(Image.ARROW_N)
elif button_b.is_pressed():
pin0.write_digital(1)
pin8.write_digital(1)
pin1.write_digital(0)
pin2.write_digital(0)
display.show(Image.ARROW_S)
else:
pin | 0.write_digital(0)
pin8.write_digital(0)
pin1.write_digital(0)
pin2.write_digital(0)
display.show(Image.NO)
|
PureStorage-OpenConnect/purestorage-flocker-driver | tests/utils/__init__.py | Python | apache-2.0 | 66 | 0.015152 | # Copyright 2016 Pure Storage Inc.
# Se | e LIC | ENSE file for details. |
ali-salman/Aspose.Slides-for-Java | Plugins/Aspose-Slides-Java-for-Jython/asposeslides/WorkingWithSlidesInPresentation/Thumbnail.py | Python | mit | 4,221 | 0.005923 | from asposeslides import Settings
from com.aspose.slides import Presentation
from com.aspose.slides import SaveFormat
from javax import ImageIO
from java.io import File
class Thumbnail:
def __init__(self):
# Generating a Thumbnail from a Slide
self.create_thumbnail()
# Generating a Thumbnail from a Slide with User Defined Dimensions
self.create_thumbnail_custom_size()
# Generating a Thumbnail from a Slide in Notes Slides View
self.create_thumbnail_in_notes_slides_view()
# Generating a Thumbnail of User Defined Window from a Slide
self.create_thumbnail_of_user_defined_window()
def create_thumbnail(dataDir):
dataDir = Settings.dataDir + 'WorkingWithSlidesInPresentation/Thumbnail/'
# Instantiate Presentation class that represents the presentation file
pres = Presentation(dataDir + 'demo.pptx')
# Access the first slide
slide = pres.getSlides().get_Item(0)
# Create a full scale image
image = slide.getThumbnail()
# Save the image to disk in JPEG format
imageIO = ImageIO
imageIO.write(image, "jpeg", File(dataDir + "ContentBG_tnail.jpg"))
print "Created thumbnail, please check the output file." . PHP_EOL
def create_thumbnail_custom_size(dataDir):
# Instantiate Presentation class that represents the presentation file
pres = Presentation(dataDir + 'demo.pptx')
# Access the first slide
slide = pres.getSlides().get_Item(0)
# User defined dimension
desired_x = 1200
desired_y = 800
# Getting scaled value of X and Y
scale_x = (1.0 / java_values(pres.getSlideSize().getSize().getWidth())) * desired_x
scale_y = (1.0 / java_values(pres.getSlideSize().getSize().getHeight())) * desired_y
# Create a full scale image
image = slide.getThumbnail(scale_x, scale_y)
# Save the image to disk in JPEG format
imageIO = ImageIO()
imageIO.write(image, "jpeg", File(dataDir + "ContentBG_tnail.jpg"))
print "Created thumbnail with custom size, please check the output file.". PHP_EOL
def create_thumbnail_in_notes_slides_view(dataDir):
# Instantiate Presentation class that represents the presentation file
pres = Presentation(dataDir + 'demo.pptx')
# Access the first slide
slide = pres.getSlides().get_Item(0)
# User defined dimension
desired_x = 1200
desired_y = 800
# Getting scaled value of X and Y
scale_x = (1.0 / java_values(pres.getSlideSize().getSize().getWidth())) * desired_x
scale_y = (1.0 / java_values(pres.getSlideSize().getSize().getHeight())) * desired_y
# Create a full scale image
image = slide.getNotesSlide().getThumbnail(scale_x, scale_y)
# Save the image to disk in JPEG format
imageIO = ImageIO()
imageIO.write(image, "jpeg", File(dataDir + "ContentBG_tnail.jpg"))
print "Created thumbnail in notes slides view, please check the output file." . PHP_EOL
def create_thumbnail_of_user_defined_window(dataDir):
# Instantiate Presentation class that represents the presentation file
pres = Presentation(dataDir + 'demo.pptx')
# Access the first slide
slide = pres.getSlides().get_Item(0)
# Create a full scale image
image = slide.getThumbnail(1,1)
| # Getting the image of desired window inside generated slide thumnbnail
# BufferedImage window = image.getSubimage(windowX, windowY, windowsWidth, windowHe | ight)
window_image = image.getSubimage(100, 100, 200, 200)
# Save the image to disk in JPEG format
imageIO = ImageIO()
imageIO.write(image, "jpeg", File(dataDir + "ContentBG_tnail.jpg"))
print "Created thumbnail of user defined window, please check the output file." . PHP_EOL
if __name__ == '__main__':
Thumbnail() |
iwm911/plaso | plaso/parsers/winreg_plugins/mountpoints_test.py | Python | apache-2.0 | 2,420 | 0.001653 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the MountPoints2 Windows Registry plu | gin."""
imp | ort unittest
# pylint: disable=unused-import
from plaso.formatters import winreg as winreg_formatter
from plaso.lib import timelib_test
from plaso.parsers import winreg
from plaso.parsers.winreg_plugins import mountpoints
from plaso.parsers.winreg_plugins import test_lib
class MountPoints2PluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the MountPoints2 Windows Registry plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = mountpoints.MountPoints2Plugin()
def testProcess(self):
"""Tests the Process function."""
test_file = self._GetTestFilePath(['NTUSER-WIN7.DAT'])
key_path = self._plugin.REG_KEYS[0]
winreg_key = self._GetKeyFromFile(test_file, key_path)
event_generator = self._ParseKeyWithPlugin(self._plugin, winreg_key)
event_objects = self._GetEventObjects(event_generator)
self.assertEquals(len(event_objects), 5)
event_object = event_objects[0]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2011-08-23 17:10:14.960960')
self.assertEquals(event_object.timestamp, expected_timestamp)
regvalue = event_object.regvalue
self.assertEquals(regvalue.get('Share_Name'), r'\home\nfury')
expected_string = (
u'[{0:s}] Label: Home Drive Remote_Server: controller Share_Name: '
u'\\home\\nfury Type: Remote Drive Volume: '
u'##controller#home#nfury').format(key_path)
expected_string_short = u'{0:s}...'.format(expected_string[0:77])
self._TestGetMessageStrings(
event_object, expected_string, expected_string_short)
if __name__ == '__main__':
unittest.main()
|
deeplearning4j/deeplearning4j | libnd4j/include/graph/generated/nd4j/graph/UIAddName.py | Python | apache-2.0 | 1,253 | 0.00399 | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: graph
import flatbuffers
class UIAddName(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsUIAddName(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = UIAddName()
x.Init(buf, n + offset)
return x
# UIAddName
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf | , pos)
# UIAddName
def NameIdx(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# UIAddName
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return | None
def UIAddNameStart(builder): builder.StartObject(2)
def UIAddNameAddNameIdx(builder, nameIdx): builder.PrependInt32Slot(0, nameIdx, 0)
def UIAddNameAddName(builder, name): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def UIAddNameEnd(builder): return builder.EndObject()
|
darren-wang/ks3 | keystone/resource/routers.py | Python | apache-2.0 | 1,239 | 0 | # Copyright 2013 Metacloud, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwa | re
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI Routers for the Resource service."""
from keystone.common import router
from keystone.common import wsgi
from keystone.resource import controllers
class Routers(wsgi.RoutersBase):
def append_v3_routers | (self, mapper, routers):
routers.append(
router.Router(controllers.Domain(),
'domains', 'domain',
resource_descriptions=self.v3_resources))
routers.append(
router.Router(controllers.Project(),
'projects', 'project',
resource_descriptions=self.v3_resources))
|
jtyr/ansible-modules-core | cloud/openstack/os_object.py | Python | gpl-3.0 | 4,163 | 0.001681 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_object
short_description: Create or Delete objects and containers from OpenStack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
extends_documentation_fragment: openstack
description:
- Create or Delete objects and containers from OpenStack
options:
container:
description:
- The name of the container in which to create the object
required: true
name:
description:
- Name to be give to the object. If omitted, operations will be on
the entire container
required: false
filename:
description:
- Path to local file to be uploaded.
required: false
container_access:
description:
- desired container access level.
required: false
choices: ['private', 'public']
default: private
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
- name: "Create a object named 'fstab' in the 'config' container"
os_object:
cloud: mordred
state: present
name: fstab
container: config
filename: /etc/fstab
- name: Delete a container called config and all of its contents
os_object:
cloud: rax-iad
state: absent
container: config
'''
def process_object(
cloud_obj, container, name, filename, container_access, **kwargs):
changed = False
container_obj = cloud_obj.get_container(container)
if kwargs['state'] == 'present':
if not container_obj:
container_obj = cloud_obj.create_container(container)
changed = True
if cloud_obj.get_container_access(container) != container_access:
cloud_obj.set_container_access(container, container_access)
changed = True
if name:
if cloud_obj.is_object_stale(container, name, filename):
cloud_obj.create_object(container, name, filename)
changed = True
else:
if container_obj:
if name:
if cloud_obj.get_object_metadata(container, name):
cloud_obj.delete_object(container, name)
changed= True
else:
cloud_obj.delete_container(container)
changed= True
return changed
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
container=dict(required=True),
filename=dict(required=False, default=None),
container_access=dict(default='private', choices=['private', 'public']),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params | )
changed = process_object(cloud, **module | .params)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == "__main__":
main()
|
jitka/weblate | weblate/trans/checks/data.py | Python | gpl-3.0 | 14,817 | 0 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2016 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
# We ignore some words which are usually not translated
SAME_BLACKLIST = frozenset((
'abc',
'accelerator',
'account',
'action',
'actions',
'active',
'add-ons',
'addons',
'address',
'admin',
'administrator',
'administration',
'africa',
'agenda',
'alarm',
'album',
'alias',
'aliases',
'aliasing',
'alt',
'altitude',
'amazon',
'android',
'antialias',
'antialiasing',
'api',
'applet',
'appliance',
'appliances',
'aptitude',
'area',
'array',
'artist',
'attribute',
'attribution',
'atom',
'audio',
| 'author',
'auto',
'autostart',
'authentication',
'avatar',
'backend',
'backspace',
'backup',
'badge',
'balance',
'baltic',
'bank',
'bar',
'baseball',
'battery',
'beg | in',
'bios',
'bit',
'bitcoin',
'bitcoins',
'bitmap',
'bitmaps',
'bitrate',
'block',
'blog',
'bluetooth',
'bool',
'boolean',
'boot',
'bootloader',
'branch',
'broadcast',
'browser',
'buffer',
'byte',
'bytes',
'bzip',
'bzip2',
'cable',
'cache',
'captcha',
'caps',
'cardinality',
'charset',
'charsets',
'chat',
'china',
'click',
'client',
'clipboard',
'club',
'code',
'collation',
'color',
'commit',
'component',
'components',
'compression',
'conductor',
'configuration',
'console',
'contact',
'contacts',
'context',
'control',
'cookie',
'copyright',
'creation',
'criteria',
'crypt',
'csd',
'csv',
'ctrl',
'cvs',
'cyrillic',
'dashboard',
'data',
'database',
'databases',
'date',
'datum',
'dbm',
'debian',
'debug',
'default',
'definition',
'del',
'delete',
'demo',
'description',
'design',
'designer',
'destination',
'detail',
'details',
'devscripts',
'dialog',
'ding',
'direction',
'disc',
'distance',
'distribution',
'distro',
'doc',
'docs',
'doctor',
'document',
'documentation',
'dollar',
'download',
'downloads',
'dpkg',
'dpi',
'drizzle',
'dummy',
'dump',
'e-mail',
'editor',
'eib',
'ellipsis',
'email',
'end',
'engine',
'engines',
'enter',
'enterprise',
'enum',
'error',
'escape',
'ethernet',
'exchange',
'excel',
'expert',
'explore',
'export',
'expression',
'extension',
'extra',
'extras',
'event',
'events',
'false',
'fame',
'fanfare',
'farm',
'fauna',
'fax',
'fedora',
'feeds',
'feet',
'file',
'files',
'filter',
'filters',
'finance',
'finalisation',
'fingerprint',
'firewall',
'firmware',
'fjord',
'flash',
'flattr',
'flora',
'font',
'format',
'forum',
'freemind',
'freeplane',
'frequency',
'full',
'fulltext',
'function',
'gammu',
'gas',
'general',
'genre',
'gentoo',
'geocache',
'geocaching',
'gettext',
'global',
'gnu',
'golf',
'google',
'gib',
'git',
'gpl',
'gps',
'gpx',
'graphic',
'graphics',
'grant',
'gtk',
'gzip',
'hack',
'hacks',
'hall',
'handle',
'handler',
'hardware',
'hash',
'hashed',
'headset',
'help',
'hmpf',
'home',
'homepage',
'hong',
'hook',
'horizontal',
'host',
'hosting',
'hostname',
'hostel',
'hotel',
'html',
'http',
'https',
'hut',
'hybrid',
'hyperlink',
'iban',
'icmp',
'icon',
'icons',
'ids',
'idea',
'ignore',
'irc',
'irda',
'illustration',
'image',
'imap',
'imei',
'imsi',
'import',
'inconsistent',
'index',
'india',
'indigo',
'individual',
'info',
'information',
'infrastructure',
'inline',
'innodb',
'input',
'ins',
'insert',
'install',
'installation',
'int',
'integer',
'interlingua',
'internet',
'international',
'intro',
'introduction',
'ion',
'ios',
'ip6tables',
'iptables',
'ipv6',
'irix',
'isbn',
'ismn',
'issn',
'isrc',
'item',
'items',
'jabber',
'java',
'join',
'joins',
'jpeg',
'jpg',
'karaoke',
'kernel',
'kib',
'kill',
'knoppix',
'kong',
'korfbal',
'label',
'labels',
'land',
'latex',
'latin',
'latitude',
'layout',
'ldif',
'leap',
'level',
'libgammu',
'linestring',
'link',
'links',
'linux',
'list',
'lithium',
'lithium',
'lock',
'local',
'locales',
'log',
'logcheck',
'login',
'logo',
'logos',
'longitude',
'lord',
'ltr',
'lua',
'lzma',
'magazine',
'magazines',
'mah',
'manager',
'mandrake',
'mandriva',
'manual',
'mail',
'mailbox',
'mailboxes',
'maildir',
'mailing',
'mako',
'markdown',
'master',
'max',
'maximum',
'media',
'mediawiki',
'menu',
'merge',
'mesh',
'message',
'messages',
'meta',
'metadata',
'metal',
'metre',
'metres',
'mib',
'micropayment',
'micropayments',
'microsoft',
'migration',
'mile',
'min',
'minimum',
'mint',
'minus',
'minute',
'minutes',
'mode',
'model',
'module',
'modules',
'monitor',
'mono',
'monument',
'motel',
'motif',
'mouse',
'mph',
'mysql',
'multiplayer',
'musicbottle',
'name',
'namecoin',
'namecoins',
'navigation',
'net',
'netfilter',
'network',
'neutral',
'nimh',
'node',
'none',
'normal',
'note',
'notes',
'notify',
'notification',
'null',
'num',
'number',
'numeric',
'obex',
'office',
'offline',
'ogg',
'online',
'opac',
'open',
'opendocument',
'openmaps',
'openpgp',
'openstreet',
'opensuse',
'openvpn',
'opera',
'operator',
'option',
'options',
'orange',
'orientation',
'osm',
'osmand',
'output',
'overhead',
'package',
'page',
'pager',
'pages',
'parameter',
'parameters',
'park',
'parking',
'partition',
'partitions',
'parser',
'party',
'password',
'pause',
'paypal',
'pdf',
'pdu',
'percent',
'perfume',
'personal',
'performance',
'php',
'phpmyadmin',
'pib',
'picasa',
'ping',
'pirate',
'pirates',
'pixel',
'pixels',
'placement',
'plan',
'playlist',
'plugin',
'plugins',
'plural',
'plus',
'png',
'podcast',
'podcasts',
'point',
'polygon',
'polymer',
'pool',
'port',
'portable',
'portrait',
'position',
'post',
'postgresql',
'posts',
'pre',
'pre-commit',
'prince',
'privacy',
'private',
'procedure',
'procedures',
'process',
'profiling',
'program',
'project',
'pr |
purushothamc/myibitsolutions | stacks_queus/sliding_window_maximum.py | Python | gpl-3.0 | 670 | 0.002985 | def slidingMaximum(A, B):
from collections import deque
window = deque()
result | = list()
if not A or B <= 1:
return A
for i in xrange(B):
while window and A[window[-1]] <= A[i]:
window.pop()
window.append(i)
for j in xrange(i+1, len(A)):
print window, j
result.append(A[window[0]])
while window and window[0] <= (j - B):
window.popleft()
while window and A[window[-1]] | <= A[j]:
window.pop()
window.append(j)
result.append(window[0])
return result
A = [1, 3, -1, -3, 5, 3, 6, 7]
A = [10, 9, 8, 7, 6, 5, 4]
B = 2
print slidingMaximum(A, B) |
dnguyen0304/clare | clare/clare/application/room_list_watcher/factories.py | Python | mit | 6,716 | 0.000596 | # -*- coding: utf-8 -*-
import collections
import logging
import sys
import selenium.webdriver
from selenium.webdriver.support.ui import WebDriverWait
from . import exceptions
from . import filters
from . import flush_strategies
from . import producers
from . import record_factories
from . import scrapers
from . import senders
from . import sources
from clare import common
from clare.common import automation
from clare.common import messaging
from clare.common import retry
from clare.common import utilities
class Factory(object):
def __init__(self, properties):
"""
Parameters
----------
properties : collections.Mapping
"""
self._properties = properties
def create(self):
# Construct the room list scraper.
web_driver = selenium.webdriver.Chrome()
wait_context = WebDriverWait(
driver=web_driver,
timeout=self._properties['scraper']['wait_context']['timeout'])
scraper = scrapers.RoomList(web_driver=web_driver,
wait_context=wait_context)
# Include repeating.
# This should be composed before validation so that validation
# occurs each time instead of only once.
scraper = scrapers.Repeating(scraper=scraper)
# Include validation.
wait_context = WebDriverWait(
web_driver,
timeout=self._properties['scraper']['validator']['wait_context']['timeout'])
validator = automation.validators.PokemonShowdown(
wait_context=wait_context)
scraper = scrapers.Validating(scraper=scraper, validator=validator)
# Include retrying.
stop_strategy = retry.stop_strategies.AfterAttempt(
maximum_attempt=self._properties['scraper']['retry_policy']['stop_strategy']['maximum_attempt'])
wait_strategy = retry.wait_strategies.Fixed(
wait_time=self._properties['scraper']['retry_policy']['wait_strategy']['wait_time'])
logger = logging.getLogger(
name=self._properties['scraper']['retry_policy']['messaging_broker']['logger']['name'])
messaging_broker_factory = retry.messaging.broker_factories.Logging(
logger=logger)
messaging_broker = messaging_broker_factory.create(
event_name='ROOM_LIST_SCRAPE')
policy = retry.PolicyBuilder() \
.with_stop_strategy(stop_strategy) \
.with_wait_strategy(wait_strategy) \
.continue_on_exception(automation.exceptions.ConnectionLost) \
.continue_on_exception(exceptions.InitializationFailed) \
.continue_on_exception(exceptions.ExtractFailed) \
.with_messaging_broker(messaging_broker) \
.build()
scraper = scrapers.Retrying(scraper=scraper, policy=policy)
# Include record marshalling.
time_zone = common.utilities.TimeZone.from_name(
name=self._properties['time_zone']['name'])
record_factory = record_factories.RecordFactory(
queue_name=self._properties['queue']['name'],
time_zone=time_zone)
scraper = scrapers.RecordMarshallingDecorator(scraper=scraper,
factory=record_factory)
# Include orchestration.
logger = logging.getLogger(
name=self._properties['scraper']['logger']['name'])
scraper = scrapers.Orchestrating(scraper=scraper, logger=logger)
return scraper
def __repr__(self):
repr_ = '{}(properties={})'
return repr_.format(self.__class__.__name__, self._properties)
class Producer(object):
def __init__(self, properties, sender):
"""
Parameters
----------
properties : collections.Mapping
sender : clare.common.messaging.producer.senders.Sender
"""
self._factory = Factory(properties=properties)
self._properties = properties
self._sender = sender
def create(self):
# Construct the producer.
dependencies = self.create_dependencies()
builder = messaging.producer.builders.Builder() \
.with_source(dependencies['source']) \
.with_sender(dependencies['sender'])
for filter in dependencies['filters']:
builder = builder.with_filter(filter)
producer = builder.build()
# Include orchestration.
logger = logging.getLogger(name=self._properties['logger']['name'])
producer = producers.OrchestratingProducer(producer=producer,
logger=logger)
return producer
def create_dependencies(self):
"""
Returns
-------
dict
"""
dependencies = dict()
# Construct the source.
scraper = self._factory.create()
source = scrapers.BufferingSourceAdapter(
scraper=scraper,
url=self._properties['scraper']['url'])
dependencies['source'] = source
# Construct the sender.
# Include logging.
logger = logging.getLogger(
name=self._properties['sender']['logger']['name'])
sender = senders.L | ogging(sender=self._sender, logger=logger)
dependencies['sender'] = sender
# Construct the filters.
dependen | cies['filters'] = list()
# Construct the no duplicate filter.
countdown_timer = utilities.timers.CountdownTimer(
duration=self._properties['filter']['flush_strategy']['duration'])
after_duration = flush_strategies.AfterDuration(
countdown_timer=countdown_timer)
no_duplicate = filters.NoDuplicate(flush_strategy=after_duration)
dependencies['filters'].append(no_duplicate)
return dependencies
def __repr__(self):
repr_ = '{}(properties={}, sender={})'
return repr_.format(self.__class__.__name__,
self._properties,
self._sender)
class CommandLineArguments(Producer):
def create_dependencies(self):
dependencies = super(CommandLineArguments, self).create_dependencies()
# Construct the deque source.
deque = collections.deque(sys.argv[1:])
time_zone = common.utilities.TimeZone.from_name(
name=self._properties['time_zone']['name'])
record_factory = record_factories.RecordFactory(
queue_name=self._properties['queue']['name'],
time_zone=time_zone)
source = sources.Deque(deque=deque, record_factory=record_factory)
dependencies['source'] = source
return dependencies
|
dennereed/paleoanthro | dissertations/tests.py | Python | gpl-3.0 | 632 | 0.001582 | from django.test import TestCase
from models import Dissertation
class DisertationModelTests(TestCase):
def test_dissertation_instance_creation(self):
dissertation_starting_count = Dissertation.objects.count()
# Test object creation with required fields only
Dissertation.objects.create(
| author_last_name="Test",
author_given_names="Ima",
contact_email="imatest@someplace.edu",
titl | e="I Founda Cool Fossil And Said Somthing About It",
year=2014,
)
self.assertEqual(Dissertation.objects.count(), dissertation_starting_count+1)
|
gngrwzrd/gity | python/__old/tags.py | Python | gpl-3.0 | 1,125 | 0.026667 | # Copyright Aaron Smith 2009
#
# This file is part of Gity.
#
# Gity is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Fo | undation, eith | er version 3 of the License, or
# (at your option) any later version.
#
# Gity is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gity. If not, see <http://www.gnu.org/licenses/>.
from _util import *
try:
import re,os,subprocess,simplejson as json
except Exception,e:
sys.stderr.write(str(e))
exit(84)
try:
from _argv import *
command = "%s %s" % (options.git,"tag -l")
rcode,stout,sterr=run_command(command)
rcode_for_git_exit(rcode,sterr)
res=re.findall("([a-zA-Z0-9\.]+)",stout)
sys.stdout.write(json.dumps(res))
exit(0)
except Exception, e:
sys.stderr.write("The tags command threw this error: " + str(e))
exit(84) |
mdrobisch/roseguarden | server/app/serializers.py | Python | gpl-3.0 | 5,422 | 0.011435 | __author__ = 'drobisch'
from models import User, Action
from marshmallow import Schema, fields, post_load, post_dump
import datetime
class UserListForSupervisorsSerializer(Schema):
class Meta:
fields = ("id", "email", "firstName", "lastName", "phone", "role", "licenseMask", "keyMask", "lastBudgetUpdateDate",
"association", "accessDateStart", "accessDateEnd", "lastAccessDaysUpdateDate",
"accessTimeStart", "accessTimeEnd", "lastSyncDateTime", "accessType", "accessDaysMask", "accessDayCounter",
"budget", "cardID", "accessDayCyclicBudget")
class UserListSerializer(Schema):
class Meta:
fields = ("id", "email", "firstName", "lastName", "phone", "role", "licenseMask", "keyMask", "lastBudgetUpdateDate", "lastAccessDateTime",
"association", "registerDateTime", "lastLoginDateTime", "accessDateStart", "accessDateEnd", "lastAccessDaysUpdateDate",
"accessTimeStart", "accessTimeEnd", "lastSyncDateTime", "accessType", "accessDaysMask", "accessDayCounter",
"budget", "cardID", "accessDayCyclicBudget")
class AdminsListSerializer(Schema):
class Meta:
fields = ("email", "firstName", "lastName", "phone", "role", "association")
class SettingsListSerializer(Schema):
class Meta:
fields = ("id", "name", "value", "type")
class StatisticListSerializer(Schema):
class Meta:
fields = ("id", "statId", "name", "statType", "binningCount", "seriesCount", "description", "displayConfig", "seriesName1", "seriesName2", "seriesName3", "seriesName4", "seriesName5", "seriesName6", "seriesName7", "seriesName8")
class StatisticEntryListSerializer(Schema):
class Meta:
fields = ("id", "statId", "statId", "month", "year", "binningId", "series", "label", "value")
class UserSerializer(Schema):
class Meta:
fields = ("id", "email", "firstName", "lastName", "phone", "role", "licenseMask", "keyMask", "lastBudgetUpdateDate", "lastAccessDateTime",
"association", "registerDateTime", "lastLoginDateTime", "accessDateStart", "accessDateEnd", "lastAccessDaysUpdateDate",
"accessTimeStart", "accessTimeEnd", "lastSyncDateTime", "accessType", "accessDaysMask", "accessDayCounter",
"budget", "cardIDAssigned", "accessDayCyclicBudget")
#@post_dump(pass_many=True)
#def wrap_if_many(self, data, many=False):
# if many:
# return {'userList': data}
# return data
#@post_load
#def make_user(self, data):
# result = User("testmail","passwor","name","kasdk")
# return result
cla | ss UserSyncSerializer(Schema):
class Meta:
fields = ("id", "email", "firstName", "lastName", "phone", "role", "licenseMask", "keyMask", "lastAccessDateTime",
"association", "registerDateTime", "lastLoginDateTime", "lastSyncDateTime", "lastAccessDaysUpdateDate",
"accessDateStart", "accessDateEnd", "lastBudgetUpdateDate", "accessDayC | yclicBudget",
"accessTimeStart", "accessTimeEnd", "accessType", "accessDaysMask", "accessDayCounter",
"budget", "cardID", "password", "syncMaster", "active", "cardAuthBlock", "cardAuthSector",
"cardID", "cardSecret", "cardAuthKeyA", "cardAuthKeyB")
@post_load
def make_user(self, data):
firstName = data['firstName']
lastName = data['lastName']
email = data['email']
password = data['password']
#create base user
user = User(email, password, firstName, lastName)
user.updateUserFromSyncDict(data)
#add all aditional member-vars
user.id = data['id']
if user.id == -1:
user.id = None
return user
#return { "id": data['id'], "firstName": data['firstName'], lastName: data['lastName'], "email": email, password : password}
class SessionInfoSerializer(Schema):
class Meta:
fields = ("id", "role")
class RfidTagInfoSerializer(Schema):
userInfo = fields.String()
tagId = fields.String()
class DoorSerializer(Schema):
class Meta:
fields = ("id", "name", "displayName", "keyMask", "address", "local")
class LogSerializer(Schema):
class Meta:
fields = ("id", "date", "nodeName", "userName", "userMail", "authType", "authInfo", "logText", "logType", "logLevel", "rollbackPoint", "synced", "action", "actionParameter")
@post_load
def make_user(self, data):
date = datetime.datetime.strptime(data['date'][:19], '%Y-%m-%dT%H:%M:%S')
nodeName = data['nodeName']
userName = data['userName']
userMail = data['userMail']
authType = data['authType']
authInfo = data['authInfo']
logText = data['logText']
logType = data['logType']
logLevel = data['logLevel']
rollbackPoint = data['rollbackPoint']
action = data['action']
actionParameter = data['actionParameter']
action = Action(date,nodeName,userName,userMail,logText,logType,logLevel,authType,authInfo,action,actionParameter,rollbackPoint)
return action
#class User_Serializer (Resource):
# @marshal_with(parameter_marshaller)
# def get(self, user_id):
# entity = User.query.get(user_id)
# return entity
|
gimli-org/gimli | doc/examples/3_dc_and_ip/plot_04_ert_2_5d_potential.py | Python | apache-2.0 | 7,952 | 0.004276 | #!/usr/bin/env python
# encoding: utf-8
r"""
Geoelectrics in 2.5D
--------------------
This example shows geoelectrical (DC resistivity) forward modelling in 2.5 D, i.e.
a 2D conductivity distribution and 3D point sources, to illustrate the modeling level.
For ready-made ERT forward simulations in practice, please refer to the example on ERT
modeling and inversion using the ERTManager.
"""
###############################################################################
# Let us start with the governing partial differential equation of Poisson type
#
# .. math::
#
# \nabla\cdot(\sigma\nabla u)=-I\delta(\vec{r}-\vec{r}_{\text{s}}) \in R^3
#
# The source term (point electrode) is three-dimensional, but the distribution
# of the electrical # conductivity :math:`\sigma(x,y)` should be 2D so we apply
# a Fourier cosine transform from :math:`u(x,y,z) \mapsto u(x,k,z)` with the
# wave number :math:`k`. (:math:`D^{(a)}(u(x,y,z)) \mapsto i^{|a|}k^a u(x,z)`)
#
# .. math::
#
# \nabla\cdot( \sigma \nabla u ) - \sigma k^2 u
# &=-I\delta(\vec{r}-\vec{r}_{\text{s}}) \in R^2 \\
# \frac{\partial }{\partial x}\left(\cdot \sigma \frac{\partial u}{\partial x}\right) +
# \frac{\partial }{\partial z}\left(\cdot\sigma \frac{\partial u}{\partial z}\right) -
# \sigma k^2 u & =
# -I\delta(x-x_{\text{s}})\delta(z-z_{\text{s}}) \in R^2 \\
# \frac{\partial u}{\partial \vec{n}} & = 0 \quad\text{at the Surface}\quad (z=0) \\
# \frac{\partial u}{\partial \vec{n}} & = a u \quad\text{in the Subsurface}\quad (z<0)
#
import matplotlib
import numpy as np
import pygimli as pg
from pygimli.viewer.mpl import drawStreams
###############################################################################
# We know the exact solution by analytical formulas:
#
# .. math::
#
# u = \frac{1}{2\pi\sigma} \cdot (K_0(\|r-r^+_s\| k)+K_0(\|r-r^-_s\| k))
#
# with K0 being the Bessel function of first kind, and the normal and mirror
# sources r+ and r-. We define a function for it
def uAnalytical(p, sourcePos, k, sigma=1):
"""Calculates the analytical solution for the 2.5D geoelectrical problem.
Solves the 2.5D geoelectrical problem for one wave number k.
It calculates the normalized (for injection current 1 A and sigma=1 S/m)
potential at position p for a current injection at position sourcePos.
Injection at the subsurface is recognized via mirror sources along the
surface at depth=0.
Parameters
----------
p : pg.Pos
Position for the sought potential
sourcePos : pg.Pos
Current injection position.
k : float
Wave number
Returns
-------
u : float
Solution u(p)
"""
r1A = (p - sourcePos).abs()
# Mirror on surface at depth=0
r2A = (p - pg.Pos([1.0, -1.0])*sourcePos).abs()
if r1A > 1e-12 and r2A > 1e-12:
return 1 / (2.0 * np.pi) * 1/sigma * \
(pg.math.besselK0(r1A * k) + pg.math.besselK0(r2A * k))
else:
return 0.
###############################################################################
# We assume the so-called mixed boundary conditions (Dey & Morrison, 1979).
#
# .. math::
#
# \sigma k \frac{{\bf r}\cdot {\bf n}}{{|r|}} \frac{K_1(|r-r_s|k)}{K_0(|r-r_s|k)}
#
def mixedBC(boundary, userData):
"""Mixed boundary conditions.
Define the derivative of the analytical solution regarding the outer normal
direction :math:`\vec{n}`. So we can define the values for mixed boundary
condition :math:`\frac{\partial u}{\partial \vec{n}} = -au`
for the boundaries on the subsurface.
"""
### ignore surface boundaries for wildcard boundary condition
if boundary.norm()[1] == 1.0:
return 0
sourcePos = userData['sourcePos']
k = userData['k']
sigma = userData['s']
r1 = boundary.center() - sourcePos
# Mirror on surface at depth=0
r2 = boundary.center() - pg.Pos(1.0, -1.0) * sourcePos
r1A = r1.abs()
r2A = r2.abs()
n = boundary.norm()
if r1A > 1e-12 and r2A > 1e-12:
alpha = sigma * k * ((r1.dot(n)) / r1A * pg.math.besselK1(r1A * k) +
(r2.dot(n)) / r2A * pg.math.besselK1(r2A * k)) / \
(pg.math.besselK0(r1A * k) + pg.math.besselK0(r2A * k))
return alpha
# Note, the above is the same like:
beta = 1.0
return [alpha, beta, 0.0]
else:
return 0.0
###############################################################################
# We assemble the right-hand side (rhs) for the singular current term by hand
# since this cannot be done efficiently by `pg.solve` yet. We basically search
# for the cell containing the source and project the point using its shape
# functions `N`.
#
def rhsPointSource(mesh, source):
"""Define function for the current source term.
:math:`\delta(x-pos), \int f(x) \delta(x-pos)=f(pos)=N(pos)`
Right hand side entries will be shape functions(pos)
"""
rhs = pg.Vector(mesh.nodeCount())
cell = mesh.findCell(source)
rhs.setVal(cell.N(cell.shape().rst(source)), cell.ids())
return rhs
###############################################################################
# Now we create a suitable mesh and solve the equation with `pg.solve`.
# Note that we use a mesh with quadratic shape functions by calling `createP2`.
#
mesh = pg.createGrid(x=np.linspace(-10.0, 10.0, 41),
y=np.linspace(-15.0, 0.0, 31))
mesh = mesh.createP2()
sourcePosA = [-5.25, -3.75]
sourcePosB = [+5.25, -3.75]
k = 1e-2
sigma = 1.0
bc={'Robin': {'1,2,3': mixedBC}}
u = pg.solve(mesh, a=sigma, b=-sigma * k*k,
rhs=rhsPointSource(mesh, sourcePosA),
bc=bc, userData={'sourcePos': sourcePosA, 'k': k, 's':sigma},
verbose=True)
u -= pg.solve(mesh, a=sigma, b=-sigma * k*k,
rhs=rhsPointSource(mesh, sourcePosB),
bc=bc, userDat | a={'sourcePos': sourcePosB, 'k': k, 's':sigma},
verbose=True)
# The solution is shown by calling |
ax = pg.show(mesh, data=u, cMap="RdBu_r", cMin=-1, cMax=1,
orientation='horizontal', label='Potential $u$',
nCols=16, nLevs=9, showMesh=True)[0]
# Additionally to the image of the potential we want to see the current flow.
# The current flows along the gradient of our solution and can be plotted as
# stream lines. By default, the drawStreams method draws one segment of a
# stream line per cell of the mesh. This can be a little confusing for dense
# meshes so we can give a second (coarse) mesh as a new cell base to draw the
# streams. If `drawStreams` gets scalar data, the gradients will be calculated.
gridCoarse = pg.createGrid(x=np.linspace(-10.0, 10.0, 20),
y=np.linspace(-15.0, .0, 20))
drawStreams(ax, mesh, u, coarseMesh=gridCoarse, color='Black')
###############################################################################
# We know the exact solution so we can compare it to the numerical results.
# Unfortunately, the point source singularity does not allow a good integration
# measure for the accuracy of the resulting field so we just look for the
# differences.
#
uAna = pg.Vector(list(map(lambda p__: uAnalytical(p__, sourcePosA, k, sigma),
mesh.positions())))
uAna -= pg.Vector(list(map(lambda p__: uAnalytical(p__, sourcePosB, k, sigma),
mesh.positions())))
ax = pg.show(mesh, data=pg.abs(uAna-u), cMap="Reds",
orientation='horizontal', label='|$u_{exact}$ -$u$|',
logScale=True, cMin=1e-7, cMax=1e-1,
contourLines=False,
nCols=12, nLevs=7,
showMesh=True)[0]
#print('l2:', pg.pf(pg.solver.normL2(uAna-u)))
print('L2:', pg.pf(pg.solver.normL2(uAna-u, mesh)))
print('H1:', pg.pf(pg.solver.normH1(uAna-u, mesh)))
np.testing.assert_approx_equal(pg.solver.normL2(uAna-u, mesh),
0.02415, significant=3)
|
sassoftware/mint | mint/django_rest/rbuilder/platforms/image_type_descriptors/rawFsImage.py | Python | apache-2.0 | 3,614 | 0.000277 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
XML = """<?xml version='1.0' encoding='UTF-8'?>
<descriptor xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.rpath.com/permanent/descriptor-1.0.xsd" xsi:schemaLocation="http://www.rpath.com/permanent/descriptor-1.0.xsd descriptor-1.0.xsd">
<metadata>
<displayName>Mountable Filesystem Image Configuration</displayName>
<descriptions>
<desc>Mountable Filesystem Image Configuration</desc>
</descriptions>
</metadata>
<dataFields>
<field>
<name>displayName</name>
<descriptions>
<desc>Image name</desc>
</descriptions>
<help href="@Help_image_name@"/>
<type>str</type>
<required>true</required>
</field>
<field>
<name>options.baseFileName</name>
<descriptions>
<desc>Image filename</desc>
</descriptions>
<help href="@Help_image_filename@"/>
<type>str</type>
<required>false</required>
</field>
<field>
<name>options.installLabelPath</name>
<descriptions>
<desc>Conary installLabelPath</desc>
</descriptions>
<help href="@Help_conary_installlabelpath@"/>
<type>str</type>
<required>false</required>
</field>
<field>
<name>options.freespace</name>
<descriptions>
<desc>Free space</desc>
</descriptions>
<help href="@Help_image_freespace@"/>
<type>int</type>
<default>256</default>
<constraints>
<range>
<min>16</min>
</range>
</constraints>
<required>false</required>
</field>
<field>
<name>options.swapSize</name>
<descriptions>
<desc>Swap space</desc>
</descriptions>
<help href="@Help_image_swapspace@"/>
<type>int</type>
<default>512</default>
<constraints>
<range>
<min>16</min>
</range>
</constraints>
<required>false</required>
</field>
<fiel | d>
<name>options.autoResolve</name>
<descriptions>
<desc>Autoinstall Dependencies</desc>
</descriptions>
<help href="@Help_resolve_dependencies@"/>
<type>bool</type>
<default>false</default>
<required>false</required>
</field>
<field>
<name>options.buildOVF10</name>
<de | scriptions>
<desc>Generate in OVF 1.0?</desc>
</descriptions>
<help href="@Help_build_ovf_1_0@"/>
<type>bool</type>
<default>false</default>
<required>false</required>
</field>
</dataFields>
</descriptor>"""
|
imuntil/Python | L/game/game_stats.py | Python | mit | 311 | 0 | class GameStats():
def __init__(self, ai_settings):
| self.ai_settings = ai_settings
self.game_active = False
self.high_score = 0
self.reset_stats()
def reset_stats(self):
self.ships_left = self.ai_settings.ship_limit
self.score = 0
| self.level = 1
|
wonderpush/wonderpush-ios-sdk | official-translations.py | Python | apache-2.0 | 5,820 | 0.005326 | #!/usr/bin/python
# vim: set ts=4 sts=4 sw=4 et:
import sys
import subprocess
import re
try:
import biplist
except ImportError:
print 'Please install the `biplist` module:'
print ' sudo easy_install biplist'
sys.exit(1)
try:
import argparse
except:
print 'Cannot find library "argparse"!'
print 'Please do either:'
print ' sudo apt-get install python-argparse'
print ' sudo easy_install argparse'
print ' sudo pip install argparse'
print ' Go to https://pypi.python.org/pypi/argparse'
sys.exit(1)
SDK_ROOT = '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk'
PRINT_FILENAMES = True
STRIP_FILENAMES = True # remove SDK_ROOT prefix
KEY_VALUE_PATTERN = '\t%s =\t%s'
LANGUAGE_REGEX = re.compile(r'^(.*/)([^/]+)(\.lproj/.*)$')
def stripFilenameIfNeeded(fn):
if STRIP_FILENAMES:
return fn[len(SDK_ROOT) + 1:]
else:
return fn
def extractLanguage(fileName):
matches = LANGUAGE_REGEX.match(fileName)
if matches is None:
return
return matches.group(2)
def yieldAllDicts(pathPattern = None):
cmdline = ['/usr/bin/find', SDK_ROOT, '-type', 'f', '(', '-name', '*.strings', '-o', '-name', '*.plist', ')']
if pathPattern is not None:
cmdline.append('-path')
cmdline.append(pathPattern)
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if line == '':
break
fn = line.rstrip()
plists = biplist.readPlist(fn)
if type(plists) is not list:
plists = [plists]
for plist in plists:
if type(plist) is str or type(plist) is unicode:
plist = { '____NO____KEY____': plist }
if 'iteritems' not in dir(plist):
print stripFilenameIfNeeded(fn)
print 'Unexpected type:', type(plist)
print repr(plist)
continue
yield (fn, plist)
def yieldSiblingDicts(siblingFile):
matches = LANGUAGE_REGEX.match(siblingFile)
if matches is None:
return
for fn, plist in yieldAllDicts(SDK_ROOT + '/' + matches.group(1) + '*' + matches.group(3)):
yield fn, plist
def listLanguages(siblingFile):
for fn, plist in yieldSiblingDicts(siblingFile):
yield extractLanguage(fn)
def listAllTranslations(siblingFile, key):
for fn, plist in yieldSiblingDicts(siblingFile):
if key in plist:
print extractLanguage(fn) + KEY_VALUE_PATTERN % (key, plist[key])
def searchInValues(what):
for fn, plist in yieldAllDicts():
fnPrinted = False
for k, v in plist.iteritems():
if type(v) is not str and type(v) is not unicode:
continue
if ((type(what) is str or type(what) is str) and what in v) or (what.match(v) is not None):
if not fnPrinted:
print stripFilenameIfNeeded(fn)
fnPrinted = True
print '\t%s =\t%s' % (k, v)
def searchInKeys(what):
for fn, plist in yieldAllDicts():
fnPrinted = False
for k, v in plist.iteritems():
if type(v) is not str and type(v) is not unicode:
continue
if ((type(what) is str or type(what) is unicode) and what == k) or (what.match(k) is not None):
if not fnPrinted:
print stripFilenameIfNeeded(fn)
fnPrinted = True
print '\t%s =\t%s' % (k, v)
def searchKey(key):
for fn, plist in yieldAllDicts():
fnPrinted = False
for k, v in plist.iteritems():
if type(v) is not str:
continue
if k == key:
if not fnPrinted:
print s | tripFilenameIfNeeded(fn)
fnPrinted = True
print '\t%s =\t%s' % (k, v)
def parseArgs(argv = None, **kwargs):
parser = argparse.ArgumentParser(
formatter_class=argpars | e.RawDescriptionHelpFormatter,
conflict_handler='resolve',
description='iOS translations extractor utility',
epilog='''\
Finds and extracts translations for the iOS SDK.''')
parser.add_argument('command', type=str, choices=['values', 'keys', 'key', 'languages', 'translations'], help='Command to execute')
parser.add_argument('what', type=str, help='Value or file name')
parser.add_argument('-f', '--file', type=str, dest='file', help='Accessory filename')
parser.add_argument('-E', '--regex', action='store_true', dest='regex', help='Whether `what` is a regex instead of a regular string')
if argv is None:
argv = sys.argv[1:]
argv_toParse = argv[:]
argv_toParse.extend(['--%s' % key.replace('_', '-') for key, value in kwargs.iteritems() if value == True])
argv_toParse.extend(['--%s=%s' % (key.replace('_', '-'), value) for key, value in kwargs.iteritems() if type(value) != bool])
args = parser.parse_args(argv_toParse)
return args
def main(args):
if args.regex:
args.what = re.compile(args.what)
if args.command == 'values':
searchInValues(args.what)
elif args.command == 'keys':
searchInKeys(args.what)
elif args.command == 'key':
searchKey(args.what)
elif args.command == 'languages':
for lang in listLanguages(args.what):
print lang
elif args.command == 'translations':
if args.file is None:
raise Exception('Missing --file argument')
listAllTranslations(args.file, args.what)
else:
raise Exception('Unknown command')
if __name__ == '__main__':
try:
main(parseArgs(sys.argv[1:]))
except KeyboardInterrupt:
pass
|
StardustGogeta/Physics-2.0 | Physics 2.0.py | Python | mit | 7,824 | 0.008819 | from functools import reduce
from operator import add
from pygame.math import Vector2 as V2
import pygame as pg, os
from src.display.tkinter_windows import create_menu
from src.core import constants
def init_display():
pg.init()
info = pg.display.Info()
dims = (int(info.current_w * 0.6), int(info.current_h * 0.75))
os.environ['SDL_VIDEO_CENTERED'] = '1'
pg.display.set_icon(pg.image.load('AtomIcon.png'))
screen = pg.display.set_mode(dims, pg.RESIZABLE)
pg.display.set_caption("Physics Simulator 2.0")
return screen, V2(dims)
def refresh_display(settings_window, screen, bodies, cam):
screen.fill(settings_window.bg_color) # comment out this line for a fun time ;)
if settings_window.walls.get():
pg.draw.rect(screen, (0, 0, 0), pg.Rect(0, 0, *cam.dims), 3)
for b in bodies:
# Calculate coordinates and radius adjusted for camera
x, y = (b.position - cam.position - cam.dims / 2) * cam.scale + cam.dims / 2
pg.draw.circle(screen, b.color, (int(x), int(y)), int(b.radius * cam.scale), 0)
# The radius should be calculated in such a way that the camera can be zoomed indefinitely.
# Currently, the properties of an object can reach a distinct threshold, after which they become invisible.
pg.display.update()
def update_windows(settings_window):
arr = [0,0,[0]*5]
if settings_window.alive:
settings_window.update()
try: arr = [settings_window.gravity_slider.get() / 100, settings_window.COR_slider.get(), [settings_window.time_slider.get() / 100,
settings_window.collision.get(), settings_window.walls.get(), settings_window.g_field.get(), settings_window.gravity_on.get()]]
except: pass
for window in settings_window.properties_windows:
if window.alive: window.update()
else: settings_window.properties_windows.remove(window)
return arr
def handle_mouse(*args):
settings_window, camera, event, bodies, dims, G, COR, scroll = args
if event.button == 1:
pos = camera.position + (pg.mouse.get_pos() - dims / 2) / camera.scale + dims / 2
for b in bodies:
if b.click_collision(pos) and b not in [win.body for win in settings_window.properties_windows]:
if not settings_window.alive: # Respawn the main window if it is dead
settings_window.__init__(bodies, camera, dims, [G, COR]) # This still does not fix all errors
settings_window.properties_windows.append(create_menu("BodyProperties", bodies, camera, dims, len(settings_window.properties_windows), b))
elif event.button == 4:
camera.scale = min(camera.scale * 1.1, 100)
scroll.scale /= 1.1
elif event.button == 5:
camera.scale = max(camera.scale / 1.1, 0.01)
scroll.scale *= 1.1
def handle_events(*args):
settings_window, camera, scroll, done, dims, screen, bodies, G, COR = args
for event in pg.event.get():
if event.type == pg.VIDEORESIZE:
width, height = event.w, event.h
dims, screen = V2(width, height), pg.display.set_mode((width, height), pg.RESIZABLE)
elif event.type == pg.KEYDOWN:
scroll.key(event.key, 1)
camera.key_down(event.key)
elif event.type == pg.KEYUP:
scroll.key(event.key, 0)
camera.key_up(event.key)
elif event.type == pg.MOUSEBUTTONDOWN:
handle_mouse(settings_window, camera, event, bodies, dims, G, COR, scroll)
done |= event.type == pg.QUIT
return done, dims, screen
def handle_bodies(*args):
G, COR, time_factor, collision, walls, g_field, gravity, scroll, bodies, camera, dims, frame_count, settings_window = args
for body in bodies: # Reset previous calculations
body.acceleration = V2(0, 0)
for b, body in enumerate(bodies): # Calculate forces and set acceleration, if mutual gravitation is enabled
for o in range( | len(bodies)-1, b, -1):
if collision and bodies[o].test_collision(body):
if not COR: # Only remove second body if collision is perfectly inela | stic
bodies[o].merge(bodies[b], settings_window.properties_windows)
bodies.pop(b)
break
bodies[o].collide(bodies[b], COR)
if gravity:
force = body.force_of(bodies[o], G) # This is a misnomer; `force` is actually acceleration / mass
body.acceleration += bodies[o].mass * force
bodies[o].acceleration -= body.mass * force
body.acceleration.y += G / 50 * g_field # Uniform gravitational field
body.apply_motion(time_factor)
body.position += scroll.val
if not frame_count % 100 and body.position.length() > 100000: # TODO: find a good value from this boundary
bodies.remove(body)
for window in settings_window.properties_windows:
if window.body is body:
settings_window.properties_windows.remove(window)
window.destroy()
break
if walls: # Wall collision
d, r = ((body.position - camera.position) - dims / 2) * camera.scale + dims / 2, body.radius * camera.scale
for i in 0, 1:
x = d[i] # x is the dimension (x,y) currently being tested / edited
if x <= r or x >= dims[i] - r:
body.velocity[i] *= -COR # Reflect the perpendicular velocity
body.position[i] = (2*(x<r)-1) * (r-dims[i]/2) / camera.scale + dims[i] / 2 + camera.position[i] # Place body back into frame
class Scroll:
def __init__(self):
self.down, self.map, self.val, self.scale = [0, 0, 0, 0], [pg.K_a, pg.K_w, pg.K_d, pg.K_s], V2(0, 0), 1
def key(self, key, down):
if key in self.map:
self.down[self.map.index(key)] = down
def update_value(self):
self.val = (self.val + self.scale * (V2(self.down[:2])-self.down[2:])) * .95
class Camera:
def __init__(self, dims):
self.position, self.velocity, self.dims, self.scale, self.map = V2(0, 0), V2(0, 0), dims, 1, [pg.K_RIGHT, pg.K_LEFT, pg.K_UP, pg.K_DOWN]
def key_down(self, key):
if key in self.map:
self.velocity = V2((3/self.scale,0) if key in self.map[:2] else (0,3/self.scale)).elementwise() * ((self.map.index(key) not in (1,2)) * 2 - 1)
def key_up(self, key):
if key in self.map:
self.velocity = self.velocity.elementwise() * ((0,1) if key in self.map[:2] else (1,0))
def move_to_com(self, bodies):
total_mass = sum(b.mass for b in bodies)
self.position = reduce(add, (b.position * b.mass for b in bodies)) / total_mass - self.dims / 2
def move_to_body(self, body):
self.position = body.position - self.dims / 2
def apply_velocity(self):
self.position += self.velocity
def main():
screen, dims = init_display()
bodies, camera, scroll = [], Camera(dims), Scroll()
settings_window, clock, done, frame_count = create_menu("Settings", bodies, camera, dims, [constants.G, constants.COR]), pg.time.Clock(), False, 0
while not done:
clock.tick(constants.clock_speed)
frame_count += 1
camera.apply_velocity()
G, COR, misc_settings = update_windows(settings_window)
done, dims, screen = handle_events(settings_window, camera, scroll, done, dims, screen, bodies, G, COR)
handle_bodies(G, COR, *misc_settings, scroll, bodies, camera, dims, frame_count, settings_window)
refresh_display(settings_window, screen, bodies, camera)
scroll.update_value()
pg.quit()
if settings_window.alive: settings_window.destroy()
if __name__ == "__main__":
main()
|
NiloFreitas/Deep-Reinforcement-Learning | reinforcement/players/player_reinforce_rnn_2.py | Python | mit | 4,361 | 0.036918 | from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
| self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self. | brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()
|
CRImier/pybatchinstall | lists/make_json.py | Python | mit | 1,028 | 0.013619 | import json
import sys
import os
import shlex
import | subprocess
def execute(*args):
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT, shell=True)
result = [0, output]
except subprocess.CalledProcessError as e:
result = [int(e.returncode), e.output]
return result
def transform(filename):
output = filename+".json"
f = open(filename, 'r')
name = f.readline().strip("\n")
description = f.readline().strip("\n")
packages = f.read | line().strip("\n")
#pre_install = shlex.split(f.readline().strip("\n"))
#post_install = shlex.split(f.readline().strip("\n"))
json_dict = {"name":name, "description":description, "packages":[package for package in packages.split(" ") if package], "pre-install":[], "post-install":[]}
json.dump(json_dict, open(output, "w"))
if __name__ == "__main__":
files = [file for file in os.listdir(".") if not (file.endswith(".py") or file.endswith(".json"))]
for file in files:
transform(file)
|
nandhp/youtube-dl | youtube_dl/extractor/vimeo.py | Python | unlicense | 33,081 | 0.002542 | # encoding: utf-8
from __future__ import unicode_literals
import json
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_urlparse,
)
from ..utils import (
determine_ext,
ExtractorError,
InAdvancePagedList,
int_or_none,
RegexNotFoundError,
sanitized_Request,
smuggle_url,
std_headers,
unified_strdate,
unsmuggle_url,
urlencode_postdata,
unescapeHTML,
parse_filesize,
)
class VimeoBaseInfoExtractor(InfoExtractor):
_NETRC_MACHINE = 'vimeo'
_LOGIN_REQUIRED = False
_LOGIN_URL = 'https://vimeo.com/log_in'
def _login(self):
(username, password) = self._get_login_info()
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return
self.report_login()
webpage = self._download_webpage(self._LOGIN_URL, None, False)
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = urlencode_postdata({
'action': 'login',
'email': username,
'password': password,
'service': 'vimeo',
'token': token,
})
login_request = sanitized_Request(self._LOGIN_URL, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_request.add_header('Referer', self._LOGIN_URL)
self._set_vimeo_cookie('vuid', vuid)
self._download_webpage(login_request, None, False, 'Wrong login info')
def _extract_xsrft_and_vuid(self, webpage):
xsrft = self._search_regex(
r'(?:(?P<q1>["\'])xsrft(?P=q1)\s*:|xsrft\s*[=:])\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
webpage, 'login token', group='xsrft')
vuid = self._search_regex(
r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1',
webpage, 'vuid', group='vuid')
return xsrft, vuid
def _set_vimeo_cookie(self, name, value):
self._set_cookie('vimeo.com', name, value)
class VimeoIE(VimeoBaseInfoExtractor):
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
_VALID_URL = r'''(?x)
https?://
(?:
(?:
www|
(?P<player>player)
)
\.
)?
vimeo(?P<pro>pro)?\.com/
(?!channels/[^/?#]+/?(?:$|[?#])|(?:album|ondemand)/)
(?:.*?/)?
(?:
(?:
play_redirect_hls|
moogaloop\.swf)\?clip_id=
)?
(?:videos?/)?
(?P<id>[0-9]+)
/?(?:[?&].*)?(?:[#].*)?$
'''
IE_NAME = 'vimeo'
_TESTS = [
{
'url': 'http://vimeo.com/56015672#at=0',
'md5': '8879b6cc097e987f02484baf890129e5',
'info_dict': {
'id': '56015672',
'ext': 'mp4',
'title': "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
'description': 'md5:2d3305bad981a06ff79f027f19865021',
'upload_date': '20121220',
'uploader_url': 're:https?://(?:www\.)?vimeo\.com/user7108434',
| 'uploader_id': 'user7108434',
'uploader': 'Filippo Valsorda',
'duration': 10,
| },
},
{
'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
'note': 'Vimeo Pro video (#1197)',
'info_dict': {
'id': '68093876',
'ext': 'mp4',
'uploader_url': 're:https?://(?:www\.)?vimeo\.com/openstreetmapus',
'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
'description': 'md5:fd69a7b8d8c34a4e1d2ec2e4afd6ec30',
'duration': 1595,
},
},
{
'url': 'http://player.vimeo.com/video/54469442',
'md5': '619b811a4417aa4abe78dc653becf511',
'note': 'Videos that embed the url in the player page',
'info_dict': {
'id': '54469442',
'ext': 'mp4',
'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
'uploader': 'The BLN & Business of Software',
'uploader_url': 're:https?://(?:www\.)?vimeo\.com/theblnbusinessofsoftware',
'uploader_id': 'theblnbusinessofsoftware',
'duration': 3610,
'description': None,
},
},
{
'url': 'http://vimeo.com/68375962',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'note': 'Video protected with password',
'info_dict': {
'id': '68375962',
'ext': 'mp4',
'title': 'youtube-dl password protected test video',
'upload_date': '20130614',
'uploader_url': 're:https?://(?:www\.)?vimeo\.com/user18948128',
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people\u2026',
},
'params': {
'videopassword': 'youtube-dl',
},
},
{
'url': 'http://vimeo.com/channels/keypeele/75629013',
'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
'note': 'Video is freely available via original URL '
'and protected with password when accessed via http://vimeo.com/75629013',
'info_dict': {
'id': '75629013',
'ext': 'mp4',
'title': 'Key & Peele: Terrorist Interrogation',
'description': 'md5:8678b246399b070816b12313e8b4eb5c',
'uploader_url': 're:https?://(?:www\.)?vimeo\.com/atencio',
'uploader_id': 'atencio',
'uploader': 'Peter Atencio',
'upload_date': '20130927',
'duration': 187,
},
},
{
'url': 'http://vimeo.com/76979871',
'note': 'Video with subtitles',
'info_dict': {
'id': '76979871',
'ext': 'mp4',
'title': 'The New Vimeo Player (You Know, For Videos)',
'description': 'md5:2ec900bf97c3f389378a96aee11260ea',
'upload_date': '20131015',
'uploader_url': 're:https?://(?:www\.)?vimeo\.com/staff',
'uploader_id': 'staff',
'uploader': 'Vimeo Staff',
'duration': 62,
}
},
{
# from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
'url': 'https://player.vimeo.com/video/98044508',
'note': 'The js code contains assignments to the same variable as the config',
'info_dict': {
'id': '98044508',
'ext': 'mp4',
'title': 'Pier Solar OUYA Official Trailer',
'uploader': 'Tulio Gonçalves',
'uploader_url': 're:https?://(?:www\.)?vimeo\.com/user28849593',
'uploader_id': 'user28849593',
},
},
{
# contains original format
'url': 'https://vimeo.com/33951933',
'md5': '53c688fa95a55bf4b7293d37a89c5c53',
'info_dict': {
'id |
foursquare/commons-old | src/python/twitter/pants/tasks/filedeps.py | Python | apache-2.0 | 1,488 | 0.00336 | # ==================================================================================================
# Copyright 2012 Twitter, Inc.
# ------------------------- | -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "Licen | se");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import itertools
import os
from twitter.pants.targets import TargetWithSources
from twitter.pants.targets.jvm_binary import JvmApp
from twitter.pants.tasks.console_task import ConsoleTask
__author__ = 'Dave Buchfuhrer'
class FileDeps(ConsoleTask):
def console_output(self, targets):
files = set()
for target in targets:
if isinstance(target, TargetWithSources):
files.update(target.expand_files(recursive=False))
if isinstance(target, JvmApp):
files.update(itertools.chain(*[bundle.filemap.keys() for bundle in target.bundles]))
return files
|
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/sklearn/cluster/dbscan_.py | Python | gpl-2.0 | 12,482 | 0 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None,
random_state=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
| p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weigh | t of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if random_state is not None:
warnings.warn("The parameter random_state is deprecated in 0.16 "
"and will be removed in version 0.18. "
"DBSCAN is deterministic except for rare border cases.",
category=DeprecationWarning)
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise dis |
Ohjel/wood-process | jump/animation.py | Python | mit | 750 | 0.004 | import pygame
class Animation:
def __init__(self, sheet, seq):
#Attributes
self.sheet = sheet
self.length = seq[0]
self.delay = seq[1]
self.x = seq[2]
self.y = seq[3]
sel | f.w = seq[4]
self.h = seq[5]
self.step = 0
self.tick = 0
self.curX = self.x
def draw(self, screen, dest):
screen.blit(self.sheet | , dest, pygame.Rect(self.curX, self.y, self.w, self.h))
if self.tick == self.delay:
self.tick = 0
self.step += 1
if self.step < self.length:
self.curX += self.w
else:
self.step = 0
self.curX = self.x
else:
self.tick += 1
|
Featuretools/featuretools | featuretools/feature_base/features_deserializer.py | Python | bsd-3-clause | 5,124 | 0.001561 | import json
import boto3
from featuretools.entityset.deserialize import \
description_to_entityset as deserialize_es
from | featuretools.feature_base.feature_base import (
AggregationFeature,
DirectFeature,
Feature,
FeatureBase,
FeatureOutputSlice,
GroupByTransformFeature,
IdentityFeature,
TransformFeature
)
from featuretools.primitives.utils import PrimitivesDeserializer
from featuretools.utils.gen_utils import (
check_schema_version,
use_s3fs_features,
use_smartopen_features
)
from featuretools.utils.wrangle import _is_s3, _is_url
def load_features(features, profile_name=None): |
"""Loads the features from a filepath, S3 path, URL, an open file, or a JSON formatted string.
Args:
features (str or :class:`.FileObject`): The location of where features has
been saved which this must include the name of the file, or a JSON formatted
string, or a readable file handle where the features have been saved.
profile_name (str, bool): The AWS profile specified to write to S3. Will default to None and search for AWS credentials.
Set to False to use an anonymous profile.
Returns:
features (list[:class:`.FeatureBase`]): Feature definitions list.
Note:
Features saved in one version of Featuretools or python are not guaranteed to work in another.
After upgrading Featuretools or python, features may need to be generated again.
Example:
.. ipython:: python
:suppress:
import featuretools as ft
import os
.. code-block:: python
filepath = os.path.join('/Home/features/', 'list.json')
ft.load_features(filepath)
f = open(filepath, 'r')
ft.load_features(f)
feature_str = f.read()
ft.load_features(feature_str)
.. seealso::
:func:`.save_features`
"""
return FeaturesDeserializer.load(features, profile_name).to_list()
class FeaturesDeserializer(object):
FEATURE_CLASSES = {
'AggregationFeature': AggregationFeature,
'DirectFeature': DirectFeature,
'Feature': Feature,
'FeatureBase': FeatureBase,
'GroupByTransformFeature': GroupByTransformFeature,
'IdentityFeature': IdentityFeature,
'TransformFeature': TransformFeature,
'FeatureOutputSlice': FeatureOutputSlice
}
def __init__(self, features_dict):
self.features_dict = features_dict
self._check_schema_version()
self.entityset = deserialize_es(features_dict['entityset'])
self._deserialized_features = {} # name -> feature
self._primitives_deserializer = PrimitivesDeserializer()
@classmethod
def load(cls, features, profile_name):
if isinstance(features, str):
try:
features_dict = json.loads(features)
except ValueError:
if _is_url(features):
features_dict = use_smartopen_features(features)
elif _is_s3(features):
session = boto3.Session()
if isinstance(profile_name, str):
transport_params = {'session': boto3.Session(profile_name=profile_name)}
features_dict = use_smartopen_features(features, transport_params)
elif profile_name is False:
features_dict = use_s3fs_features(features)
elif session.get_credentials() is not None:
features_dict = use_smartopen_features(features)
else:
features_dict = use_s3fs_features(features)
else:
with open(features, 'r') as f:
features_dict = json.load(f)
return cls(features_dict)
return cls(json.load(features))
def to_list(self):
feature_names = self.features_dict['feature_list']
return [self._deserialize_feature(name) for name in feature_names]
def _deserialize_feature(self, feature_name):
if feature_name in self._deserialized_features:
return self._deserialized_features[feature_name]
feature_dict = self.features_dict['feature_definitions'][feature_name]
dependencies_list = feature_dict['dependencies']
# Collect dependencies into a dictionary of name -> feature.
dependencies = {dependency: self._deserialize_feature(dependency)
for dependency in dependencies_list}
type = feature_dict['type']
cls = self.FEATURE_CLASSES.get(type)
if not cls:
raise RuntimeError('Unrecognized feature type "%s"' % type)
args = feature_dict['arguments']
feature = cls.from_dictionary(args, self.entityset, dependencies,
self._primitives_deserializer)
self._deserialized_features[feature_name] = feature
return feature
def _check_schema_version(self):
check_schema_version(self, 'features')
|
condereis/realtime-stock | docs/conf.py | Python | mit | 8,475 | 0.00531 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# rtstock documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import rtstock
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Realtime Stock'
copyright = u"2016, Rafael Lopes Conde dos Reis"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = rtstock.__version__
# The full version, including alpha/beta/rc tags.
release = rtstock.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'rtstockdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'rtstock.tex',
u'Realtime Stock Documentation',
u'Rafael Lopes Conde dos Reis', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, s | how page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Document | s to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rtstock',
u'Realtime Stock Documentation',
[u'Rafael Lopes Conde dos Reis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'rtstock',
u'Realtime Stock Documentation',
u'Rafael Lopes Conde dos Reis',
'rtstock',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals. |
QudevETH/PycQED_py3 | pycqed/measurement/detector_functions.py | Python | mit | 79,958 | 0.000638 | """
Module containing a collection of detector functions used by the
Measurement Control.
"""
import traceback
import numpy as np
from copy import deepcopy
import time
from string import ascii_uppercase
from pycqed.analysis import analysis_toolbox as a_tools
from pycqed.utilities.timer import Timer
from qcodes.instrument.parameter import _BaseParameter
from qcodes.instrument.base import Instrument
from pycqed.utilities.errors import NoProgressError
import logging
log = logging.getLogger(__name__)
class Detector_Function(object):
'''
Detector_Function class for MeasurementControl
'''
def __init__(self, **kw):
self.name = self.__class__.__name__
self.set_kw()
self.value_names = ['val A', 'val B']
self.value_units = ['arb. units', 'arb. units']
# to be used by MC.get_percdone() and the IntegratingAveragingPollDetector
self.acq_data_len_scaling = 1
self.timer = Timer(self.name)
# The following properties are not implemented in all detector
# functions (i.e., might be ignored in some detector functions),
# but are created here to have a common interface.
self.progress_callback = kw.get('progress_callback', None)
self.progress_callback_interval = kw.get(
'progress_callback_interval', 5) # in seconds
# tells MC whether to show live plotting for the measurement
self.live_plot_allowed = kw.get('live_plot_allowed', True)
def set_kw(self, **kw):
'''
convert keywords to attributes
'''
for key in list(kw.keys()):
exec('self.%s = %s' % (key, kw[key]))
def get_values(self):
pass
def prepare(self, **kw):
pass
def finish(self, **kw):
pass
def generate_metadata(self):
"""
Creates a dict det_metadata with all the attributes of itself.
:return: {'Detector Metadata': det_metadata}
"""
try:
# Go through all the attributes of itself, pass them to
# savable_attribute_value, and store them in det_metadata
det_metadata = {k: self.savable_attribute_value(v, self.name)
for k, v in self.__dict__.items()}
# Change the 'detectors' entry from a list of dicts to a dict with
# keys uhfName_detectorName
detectors_dict = {}
for d in det_metadata.pop('detectors', []):
# isinstance(d, dict) only if self was a multi-detector function
if isinstance(d, dict):
# d will never contain the key "detectors" because the
# framework currently does not allow to pass an instance of
# UHFQC_multi_detector in the "detectors" attribute of
# UHFQC_Base since UHFQC_multi_detector does not have the
# attribute "UHFQC" (Steph, 23.10.2020)
if 'acq_devs' in d:
# d["acq_devs"] will always contain one item because of how
# savable_attribute_value was written.
detectors_dict.update(
{f'{d["acq_devs"][0]} {d["name"]}': d})
else:
detectors_dict.update({f'{d["name"]}': d})
if len(detectors_dict):
det_metadata['detectors'] = detectors_dict
return {'Detector Metadata': det_metadata}
except Exception:
# Unhandled errors in metadata creation are not critical for the
# measurement, so we log them as warnings.
log.warning(traceback.format_exc())
return {}
@staticmethod
def savable_attribute_value(attr_val, det_name):
"""
Helper function for converting the attribute of a Detector_Function
(or its children) to a format that will make the entry more meaningful
when saved to an hdf file.
In particular, this function makes sure that if any of the det_func
attributes are class instances (like det_func.AWG), they are passed to
the metadata as class_instance.name instead of class_instance, in which
case it would be saves as a string "<Pulsar: Pulsar>".
This function also nicely resolves the detectors attribute of the
detector functions, which would otherwise also be saved as
["<pycqed.measurement.detector_functions.UHFQC_classifier_detector
at 0x22bf280a400>",
"<pycqed.measurement.detector_functions.UHFQC_classifier_detector
at 0x22bf280a208>"].
It parses this list and replaces each instance with its __dict__
attribute.
:param attr_val: attribute value of a Detector_Function instance or
an instance of its children
:param det_name: name of a Detector_Function instance or an instance
of its children
:return: converted attribute value
"""
if isinstance(attr_val, Detector_Function):
if hasattr(attr_val, 'detectors') and \
det_name != attr_val.detectors[0].name:
return {k: Detector_Function.savable_attribute_value(
v, attr_val.name)
for k, v in attr_val.__dict__.items()}
else:
return attr_val.name
elif isinstance(attr_val, Instrument):
try:
return attr_val.name
except AttributeError:
return repr(attr_val)
elif callable(attr_val):
return repr(attr_val)
elif isinstance(attr_val, (list, tuple)):
return [Detector_Function.savable_attribute_value(av, det_name)
for av in attr_val]
else:
return attr_val
class Multi_Detector(Detector_Function):
"""
Combines several detectors of the same type (hard/soft) into a single
detector.
"""
def __init__(self, detectors: list,
det_idx_suffix: bool=True, **kw):
"""
detectors (list): a list of detectors to combine.
det_idx_suffix(bool): if True suffixes the value names with
"_det{idx}" where idx refers to the relevant detector.
"""
self.detectors = detectors
self.name | = 'Multi_detector'
self.value_names = []
self.value_units = []
for i, detector in enumerate(detectors):
for detector_value_name in detector.value_names:
if det_idx_suffix:
detector_value_name += '_det{}'.format(i)
self.value_names.append(detector_value_name)
for detector_value_unit in detector.value_units:
se | lf.value_units.append(detector_value_unit)
self.detector_control = self.detectors[0].detector_control
for d in self.detectors:
if d.detector_control != self.detector_control:
raise ValueError('All detectors should be of the same type')
def prepare(self, **kw):
for detector in self.detectors:
detector.prepare(**kw)
def get_values(self):
values_list = []
for detector in self.detectors:
new_values = detector.get_values()
values_list.append(new_values)
values = np.concatenate(values_list)
return values
def acquire_data_point(self):
# N.B. get_values and acquire_data point are virtually identical.
# the only reason for their existence is a historical distinction
# between hard and soft detectors that leads to some confusing data
# shape related problems, hence the append vs concatenate
values = []
for detector in self.detectors:
new_values = detector.acquire_data_point()
values = np.append(values, new_values)
return values
def finish(self):
for detector in self.detectors:
detector.finish()
class IndexDetector(Detector_Function):
"""Detector function that indexes the result of another detector function.
Args:
dete |
benosment/daily-exercises | hr-exception.py | Python | gpl-2.0 | 104 | 0 | #!/bin/python3
S = input().stri | p()
try:
print(int(S))
except ValueError:
print("Bad Strin | g")
|
yahoo/bossmashup | templates/__init__.py | Python | bsd-3-clause | 278 | 0.014388 | #Copyright (c) 2011 Yahoo | ! Inc. All rights reserved. Licensed under t | he BSD License.
# See accompanying LICENSE file or http://www.opensource.org/licenses/BSD-3-Clause for the specific language governing permissions and limitations under the License.
__all__ = ["publisher"]
|
cripplet/practice | codeforces/489/attempt/a_sort.py | Python | mit | 1,271 | 0.050354 | import fileinput
def str_to_int(s):
return([ int(x) for x in s.split() ])
# args = [ 'line 1', 'line 2', ... ]
def proc_input(args):
return str_to_int(args[1] | )
def find(ints, offset):
min = float('inf')
min_index = -1
for k, v in enumerate(ints[offset:]):
if v < min:
min = v
min_index = k + offset
return(min_index)
def swap(l, a, b):
t = l[a]
l[a] = l[b]
l[b] = t
def solve(args, verbose=False, debug=False):
ints = proc_input(args)
if debug:
from copy import deepcopy
d = deepcopy(ints)
results = []
for i in xrang | e(len(ints)):
min_index = find(ints, i)
if min_index != i:
results.append((min_index, i))
swap(ints, min_index, i)
if debug:
d.sort()
assert(ints == d)
assert(len(results) <= len(ints))
if verbose:
print len(results)
for (src, tgt) in results:
print src, tgt
return (len(results), results)
def test():
assert(str_to_int('1 2 3') == [ 1, 2, 3 ])
assert(solve([ '5', '5 2 5 1 4' ], debug=True) == ( 2, [ (3, 0), (4, 2) ] ))
assert(solve([ '6', '10 20 20 40 60 60' ], debug=True) == (0, []))
assert(solve([ '2', '101 100' ], debug=True) == (1, [ (1, 0) ]))
if __name__ == '__main__':
from sys import argv
if argv.pop() == 'test':
test()
else:
solve(list(fileinput.input()), verbose=True)
|
WritingTechForJarrod/app | src/wtfj/connectors_local.py | Python | gpl-3.0 | 2,202 | 0.052225 | import time
from threading import Thread
import threading
from wtfj_ids import *
from wtfj_utils import *
class Printer:
''' Opens a new output window and prints messages sent to it '''
def __init__(self,header=''):
self._header = header
def send(self,string):
print(self._header+string)
class Console:
''' Allows user to enter commands '''
def __init__(self,prompt='[$] '):
self._prompt = prompt
self._at = ''
def poll(self,wait_s=None,uid=None):
try:
prompt = str(self._at)+str(self._prompt)
msg = raw_input(prompt)
if msg == '':
self._at = ''
return []
if msg[0] == '@':
self._at = msg.split()[0]+' '
else:
msg = self._at+msg
return [msg]
except Exception as e:
print(repr(e))
return []
def subscribe(self,*uids):
pass
class Script:
''' Runs a script passed as a list, default frequency = 1000Hz '''
def __init__(self,msgs):
self._msgs = msgs
self._index = 0
self._period = 0.001
self._pid = 'SCRIPT'
def poll(self,wait_s=None,uid=None):
period = self._period if wait_s is None else wait_s
time.sleep(period)
try:
msg = self._msgs[self._index]
print(self._pid+' SEND > '+msg)
self._inde | x += 1
return [msg]
except IndexError:
return []
def subscribe(self,*uids):
for uid in uids:
if uid is not None:
if uid[0] is '@': assert uid[1:] in get_attr(Uid)
else: assert uid in get_attr(Uid)
return self
def load(self,msg_array):
self._msgs += msg_array
return self
def set_period(self,period):
self._period = period
return self
def run(self):
t = threading.current_thread()
self._pid = str(t.ident)+' '+str(t.name)
while len | (self.poll()) > 0: pass
def run_async(self):
Thread(target=self.run).start()
if __name__ == '__main__':
Printer('A simple printer: ').send('Just printing a msg to current_thread')
script = [
'@other_uid topic data',
'@other_uid topic',
'uid topic data',
'uid topic'
]
async = ['async topic '+str(n) for n in [1,2,3,4,5,6,7,8,9,0]]
async2 = ['async2 topic '+str(n) for n in [1,2,3,4,5,6,7,8,9,0]]
Script(script).set_period(1).run()
Script(async).set_period(0.15).run_async()
Script(async2).set_period(0.2).run_async() |
jgrizou/robot_omniwheel | catkin_ws/src/roslego/scripts/publisher.py | Python | gpl-3.0 | 541 | 0.001848 | import ros | py
import time
from collections import deque
class Publisher(object):
def __init__(self):
self.publishers = {}
self.queue = deque()
def add_publi | sher(self, alias, publisher):
self.publishers[alias] = publisher
def publish(self):
while len(self.queue) > 0:
alias, msg = self.queue.popleft()
print "publishing " + alias + ":" + str(msg)
self.publishers[alias].publish(msg)
def append(self, alias, msg):
self.queue.append((alias, msg))
|
dkopecek/amplify | third-party/quex-0.65.2/quex/engine/analyzer/mega_state/template/gain_entry.py | Python | gpl-2.0 | 865 | 0.001156 |
def do(A, B):
"""Computes 'gain' with respect to entry actions, if two states are
combined.
"""
# Every different command list requires a separate door.
# => Entry cost is proportional to number of unique command lists.
# => Gain = number of unique command lists of A an B each
# - number of unique command lists of Combined(A, B)
A_uniq | ue_cl_set = set(ta.command_list for ta in A.action_db.itervalues())
B_unique_cl_set = set(ta.command_list for ta in B.action_db.itervalues())
# (1) Compute sizes BEFORE setting Combined_cl_set = A_unique_cl_set
A_size = len(A_unique_cl_set)
B_size = len(B_unique_cl_set)
# (2) Compute combined cost
Combined_cl_se | t = A_unique_cl_set # reuse 'A_unique_cl_set'
Combined_cl_set.update(B_unique_cl_set)
return A_size + B_size - len(Combined_cl_set)
|
threemeninaboat3247/raipy | raipy/Example.py | Python | mit | 3,116 | 0.022844 | # -*- coding: utf-8 -*-
"""
Created on Sat May 13 21:48:22 2017
@author: Yuki
"""
from PyQt5.QtWidgets import QVBoxLayout,QWidget,QHBoxLayout,QTabWidget,QStatusBar,QTextEdit,QApplication,QPushButton,QMenu,QAction
fro | m PyQt5.QtCore import pyqtSignal
EXAMPLE='Examples' #the folder exists in the same folder with __init__.py and contains samples
class MyAction(QAction):
actionName=pyqtSignal(str)
def __init__(self,*args):
super().__init__(*a | rgs)
self.triggered.connect(self.myEmit)
def myEmit(self):
self.actionName.emit(self.text())
class ExampleMenu(QMenu):
'''show example programs in Example folder'''
def __init__(self,*args):
super().__init__(*args)
import raipy
import os
folder=os.path.dirname(os.path.abspath(raipy.__file__))+'\\'+EXAMPLE
files=os.listdir(folder)
pyfiles=[file for file in files if ('.py' in file)]
self.setList(pyfiles)
def setList(self,files):
#append file names to itself and connect signals
for file in files:
action=MyAction(file,self)
self.addAction(action)
action.actionName.connect(self.showExample)
def setFileManager(self,manager):
self.manager=manager
def showExample(self,file):
self.example=ExampleWidget(file,self.manager)
self.example.show()
class ExampleWidget(QWidget):
load=pyqtSignal(str)
export=pyqtSignal(str)
def __init__(self,file,manager):
super().__init__()
exe=QPushButton('Load')
export=QPushButton('Export')
exe.pressed.connect(self.emitLoad)
export.pressed.connect(self.emitExport)
self.load.connect(manager.importFile)
self.export.connect(manager.copyFile)
buttons=QHBoxLayout()
buttons.addWidget(exe)
buttons.addWidget(export)
buttons.addStretch(1)
self.text=QTextEdit()
vbox=QVBoxLayout()
vbox.addLayout(buttons)
vbox.addWidget(self.text)
self.setLayout(vbox)
self.setText(file)
self.text.setReadOnly(True)
self.setGeometry(500, 60, 960,900)
def setText(self,file):
#show a file in EXAMPLE folder
import raipy
import os
folder=os.path.dirname(os.path.abspath(raipy.__file__))+'\\'+EXAMPLE
self.path=folder+'\\'+file
import codecs
f=codecs.open(self.path,'r','utf-8')
text=f.read()
self.text.setText(text)
def emitLoad(self):
self.load.emit(self.path)
def emitExport(self):
self.export.emit(self.path)
#メイン
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
ex=ExampleWidget()
ex.show()
import raipy
import os
root=os.path.dirname(os.path.abspath(raipy.__file__))
path=root+'\\Examples\\Demo.py'
print(path)
ex.setText(root+'\\Examples\\Demo.py')
ll=ExampleList()
print(ll.getExamples())
sys.exit(app.exec_()) |
virtool/virtool | virtool/references/tasks.py | Python | mit | 19,832 | 0.001412 | import asyncio
import json
import os
import shutil
from datetime import timedelta
from pathlib import Path
import aiohttp
import arrow
from semver import VersionInfo
import virtool.errors
import virtool.otus.db
import virtool.tasks.pg
from virtool.api.json import CustomEncoder
from virtool.github import create_update_subdocument
from virtool.history.db import patch_to_version
from virtool.history.utils import remove_diff_files
from virtool.http.utils import download_file
from virtool.references.db import (
download_and_parse_release,
fetch_and_update_release,
insert_change,
insert_joined_otu,
update_joined_otu,
)
from virtool.references.utils import check_import_data, load_reference_file
from virtool.tasks.task import Task
from virtool.utils import compress_json_with_gzip, get_temp_dir
class CloneReferenceTask(Task):
task_type = "clone_reference"
def __init__(self, app, task_id):
super().__init__(app, task_id)
self.steps = [self.copy_otus, self.create_history]
async def copy_otus(self):
manifest = self.context["manifest"]
created_at = self.context["created_at"]
ref_id = self.context["ref_id"]
user_id = self.context["user_id"]
tracker = await self.get_tracker(len(manifest))
inserted_otu_ids = list()
await virtool.tasks.pg.update(self.pg, self.id, step="copy_otus")
for source_otu_id, version in manifest.items():
_, patched, _ = await patch_to_version(self.app, source_otu_id, version)
otu_id = await insert_joined_otu(
self.db, patched, created_at, ref_id, user_id
)
inserted_otu_ids.append(otu_id)
await tracker.add(1)
await self.update_context({"inserted_otu_ids": inserted_otu_ids})
async def create_history(self):
user_id = self.context["user_id"]
inserted_otu_ids = self.context["inserted_otu_ids"]
tracker = await self.get_tracker(len(inserted_otu_ids))
await virtool.tasks.pg.update(self.pg, self.id, step="create_history")
for otu_id in inserted_otu_ids:
await insert_change(self.app, otu_id, "clone", user_id)
await tracker.add(1)
async def cleanup(self):
ref_id = self.context["ref_id"]
query = {"reference.id": ref_id}
diff_file_change_ids = await self.db.history.distinct(
"_id", {**query, "diff": "file"}
)
await virtool.tasks.pg.update(self.pg, self.id, step="cleanup")
await asyncio.gather(
self.db.references.delete_one({"_id": ref_id}),
self.db.history.delete_many(query),
self.db.otus.delete_many(query),
self.db.sequences.delete_many(query),
remove_diff_files(self.app, diff_file_change_ids),
)
class ImportReferenceTask(Task):
task_type = "import_reference"
def __init__(self, app, task_id):
super().__init__(app, task_id)
self.steps = [
self.load_file,
self.set_metadata,
self.validate,
self.import_otus,
self.create_history,
]
self.import_data = None
async def load_file(self):
path = self.context["path"]
tracker = await self.get_tracker()
try:
self.import_data = await self.run_in_thread(load_reference_file, path)
except json.decoder.JSONDecodeError as err:
return await self.error(str(err).split("JSONDecodeError: ")[1])
except OSError as err:
if "Not a gzipped file" in str(err):
return await self.error("Not a gzipped file")
else:
return await self.error(str(err))
await virtool.tasks.pg.update(
self.pg, self.id, progress=tracker.step_completed, step="load_file"
)
async def set_metadata(self):
ref_id = self.context["ref_id"]
tracker = await self.get_tracker()
try:
data_type = self.import_data["data_type"]
except (TypeError, KeyError):
data_type = "genome"
try:
organism = self.import_data["organism"]
except (TypeError, KeyError):
organism = ""
try:
targets = self.import_data["targets"]
except (TypeError, KeyError):
targets = None
update_dict = {"data_type": data_type, "organism": organism}
if targets:
update_dict["targets"] = targets
await self.db.references.update_one({"_id": ref_id}, {"$set": update_dict})
await virtool.tasks.pg.update(
self.pg, self.id, progress=tracker.step_completed, step="set_metadata"
)
async def validate(self):
tracker = await self.get_tracker()
errors = check_import_data(self.import_data, strict=False, verify=True)
if errors:
| return await self.error(errors)
await virtool.tasks.pg.update(
self.pg, self.id, progress=tracker.step_completed, step="validate"
)
async def import_otus(self):
created_at = self.context["created_at"]
ref_id = self.context["ref_id"]
user_id = self.context["user_id"]
otus = self.import_data["otus"]
tracker = await self.get_tracker(len(otus | ))
inserted_otu_ids = list()
for otu in otus:
otu_id = await insert_joined_otu(self.db, otu, created_at, ref_id, user_id)
inserted_otu_ids.append(otu_id)
await tracker.add(1)
await self.update_context({"inserted_otu_ids": inserted_otu_ids})
await virtool.tasks.pg.update(self.pg, self.id, step="import_otus")
async def create_history(self):
inserted_otu_ids = self.context["inserted_otu_ids"]
user_id = self.context["user_id"]
tracker = await self.get_tracker(len(inserted_otu_ids))
for otu_id in inserted_otu_ids:
await insert_change(self.app, otu_id, "import", user_id)
await tracker.add(1)
await virtool.tasks.pg.update(self.pg, self.id, step="create_history")
class RemoteReferenceTask(Task):
task_type = "remote_reference"
def __init__(self, app, task_id):
super().__init__(app, task_id)
self.steps = [self.download, self.create_history, self.update_reference]
self.import_data = None
self.inserted_otu_ids = list()
async def download(self):
tracker = await self.get_tracker(self.context["release"]["size"])
try:
self.import_data = await download_and_parse_release(
self.app, self.context["release"]["download_url"], self.id, tracker.add
)
except (aiohttp.ClientConnectorError, virtool.errors.GitHubError):
return await virtool.tasks.pg.update(
self.pg, self.id, error="Could not download reference data"
)
try:
data_type = self.import_data["data_type"]
except KeyError:
return await virtool.tasks.pg.update(
self.pg, self.id, error="Could not infer data type"
)
await self.db.references.update_one(
{"_id": self.context["ref_id"]},
{
"$set": {
"data_type": data_type,
"organism": self.import_data.get("organism", "Unknown"),
}
},
)
error = check_import_data(self.import_data, strict=True, verify=True)
if error:
return await virtool.tasks.pg.update(self.pg, self.id, error=error)
await virtool.tasks.pg.update(self.pg, self.id, step="import")
async def create_history(self):
otus = self.import_data["otus"]
tracker = await self.get_tracker(len(otus))
for otu in otus:
otu_id = await insert_joined_otu(
self.db,
otu,
self.context["created_at"],
self.context["ref_id"],
self.context["user_id"],
remote=True,
)
self |
jjhuo/btier2 | tools/show_block_details.py | Python | gpl-2.0 | 7,532 | 0.028943 | #!/usr/bin/python
#############################################
# show_block_details.py #
# #
# A simple python program that retrieves #
# btier block placement metadata #
# And optionally stores the data in sqlite #
# EXAMPLE CODE #
#############################################
import os
import sys,errno
import stat
import sqlite3 as lite
import time
import datetime
from stat import *
from sys import argv
RET_OK=0
RET_NODEV=-1
RET_SYSFS=-2
RET_SYSERR=-3
RET_MIGERR=-4
# Move to a higher tier when the block is hit 1.5 times more then avg
THRESHOLD_UP=1.5
# Move to a lower tier when the block is hit 0.5 times less then avg
THRESHOLD_DOWN=0.5
# MAXAGE in seconds
MAXAGE=14400
DB="btier.db"
def print_file(fp):
fp.seek(0);
print fp.read()
def usage():
print "Usage : %s sdtierX [-sql] [-migrate]" % argv.pop(0)
print "Please note : -migrate requires -sql"
exit()
def savestat(device):
try:
statinfo=os.stat(device)
except:
return -1
else:
return 0
def check_valid_device(device):
res=savestat(device)
if res != 0:
print "Not a valid tier device : %s" % device
exit(RET_NODEV)
def read_total_blocks():
blocks=0
try:
fp=open("/sys/block/"+basename+"/tier/size_in_blocks","r")
blocks=fp.read()
fp.close()
except:
return int(blocks)
else:
return int(blocks)
def sql_drop_create(cur):
try:
cur.execute("DROP TABLE IF EXISTS meta")
#BLOCKNR,DEVICE,OFFSET,ATIME,READCOUNT,WRITECOUNT
cur.execute("CREATE TABLE meta(blocknr LONG LONG INT PRIMARY KEY, device INT, \
offset LONG LONG INT, atime UNSIGNED INT, \
readcount UNSIGNED INT, writecount UNSIGNED int)")
cur.execute("CREATE INDEX meta_device_idx ON meta (device)")
except:
return
else:
return
def con_open():
try:
con = lite.connect(DB)
except slite.Error, e:
print "Error %s:" % e.args[0]
exit(DB_ERR)
else:
return con
def sql_open(con):
try:
with con:
cur = con.cursor()
sql_drop_create(cur)
except slite.Error, e:
print "Error %s:" % e.args[0]
exit(DB_ERR)
else:
return cur
def read_maxdev():
apipath="/sys/block/"+basename+"/tier/attacheddevices"
try:
fp=open(apipath,"r")
devstr=fp.read()
fp.close()
except:
print "Failed to determine attached devices"
exit(RET_MIGERR)
else:
return int(devstr)
def migrate_block(blocknr,newdev):
apipath="/sys/block/"+basename+"/tier/migrate_block"
try:
fp=open(apipath,"w")
command=str(blocknr)+ | "/"+str(newdev)
fp.write(command)
fp.close()
except IOError as e:
if e.errno == errno.EAGAIN:
return errno.EAGAIN
print "Failed to migrate block %d to device %d" % (blocknr, newdev)
exit(RET_MIGERR)
else:
return 0
def migrate_up(cur,blocknr,device):
if device == 0:
return -1
cur.e | xecute("SELECT blocknr FROM meta where blocknr = ? \
AND device = ? AND writecount > ? \
* (SELECT AVG(writecount) from meta \
where device = ?)", (blocknr,device, THRESHOLD_UP, device))
record = cur.fetchone()
if record == None:
return -1
blocknr=int(record[0])
newdev = int(device) - 1
while True:
res=migrate_block(blocknr, newdev)
if res != errno.EAGAIN:
break
time.sleep (1/5)
print "Migrated blocknr %d from device %d to device %d high hits" \
% (blocknr, device, newdev)
return 0
def migrate_down(cur,blocknr,device):
cur.execute("SELECT blocknr FROM meta WHERE blocknr = ? \
AND device = ? AND writecount > ? \
* (SELECT AVG(writecount) from meta \
where device = ?)", (blocknr, device, THRESHOLD_DOWN, device))
record = cur.fetchone()
if record == None:
return -1
blocknr=int(record[0])
newdev=device + 1
while True:
res=migrate_block(blocknr, newdev)
if res != errno.EAGAIN:
break
time.sleep (1/5)
print "Migrated blocknr %d from device %d to device %d low hits" \
% (blocknr, device, newdev)
return 0
def migrate_down_age(cur,blocknr,device):
tdev=device + 1
grace=CTIME-(MAXAGE*tdev)
cur.execute("SELECT blocknr FROM meta WHERE blocknr = ? \
AND device = ? AND atime < ? ", (blocknr,device, grace))
record = cur.fetchone()
if record == None:
return -1
blocknr=record[0]
migrate_block(blocknr, device + 1)
print "Migrated blocknr %d from device %d to device %d because of age" \
% (blocknr, device, device+1)
return 0
def do_migration(cur):
maxdev=read_maxdev()
blocknr=0
while blocknr < total_blocks:
cur.execute("SELECT device FROM meta WHERE device != -1 \
AND blocknr = ? ", [ blocknr ] )
record = cur.fetchone()
if record == None:
blocknr+=1
continue
device=int(record[0])
res=-1
res=migrate_up(cur,blocknr,device)
if res != 0:
if device < maxdev - 1:
res=migrate_down(cur,blocknr,device)
if res != 0:
res=migrate_down_age(cur,blocknr,device)
blocknr+=1
def write_sql(cur,blocknr,blockinfo):
binfo=blockinfo.split( ',' )
device=binfo.pop(0)
offset=int(binfo.pop(0))
atime=int(binfo.pop(0))
readcount=int(binfo.pop(0))
writecount=int(binfo.pop(0))
cur.execute("INSERT INTO meta VALUES(?, ?, ?, ?, ?, ?)", \
(blocknr,device,offset,atime,readcount,writecount))
def retrieve_blockinfo(total_blocks):
apipath="/sys/block/"+basename+"/tier/show_blockinfo"
con=con_open()
cur=sql_open(con)
try:
fp=open(apipath,"r+")
curblock=0
while curblock < total_blocks:
fp.write(str(curblock)+"\n")
fp.seek(0)
blockinfo=fp.read()
if sql:
write_sql(cur,curblock,blockinfo)
else:
print str(curblock)+" "+blockinfo,
curblock=curblock + 1
fp.close()
con.commit()
if migrate:
do_migration(cur)
con.close()
except:
return RET_SYSERR
else:
return RET_OK
#############################################
## MAIN starts here ##
#############################################
sql=0
migrate=0
t=datetime.datetime.now()
CTIME=time.mktime(t.timetuple())
argc=len(argv) - 1
if argc < 1:
usage()
basename = argv.pop(1)
# First check specified arguments
if "/dev/" in basename:
usage()
if argc >= 2:
sqls=argv.pop(1)
if "-sql" in sqls:
sql=1
else:
usage()
if argc == 3:
migs=argv.pop(1)
if "-migrate" in migs:
migrate=1
else:
usage()
# Check the number of blocks that make up the
# device
device="/dev/"+basename
check_valid_device(device)
total_blocks=read_total_blocks()
if 0 == total_blocks:
print "Failed to retrieve device size from sysfs"
exit(RET_SYSFS)
retrieve_blockinfo(total_blocks)
exit(RET_OK)
|
simudream/polyglot | polyglot/mapping/expansion.py | Python | gpl-3.0 | 3,238 | 0.011736 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from .base import OrderedVocabulary
from collections import defaultdict
from six import iteritems
import re
import logging
logger = logging.getLogger(__name__)
class VocabExpander(OrderedVocabulary):
def __init__(self, vocabulary, formatters, strategy):
super(VocabExpander, self).__init__(vocabulary.words)
self.strategy = strategy
self._vocab = vocabulary
self.aux_word_id = defaultdict(lambda: [])
self.formatters = formatters
self.expand(formatters)
self.aux_id_word = {id_:w for w, id_ in iteritems(self.aux_word_id)}
def __getitem__(self, key):
try:
return self._vocab[key]
except KeyError as e:
try:
return self.aux_word_id[key]
except KeyError as e:
return self.approximate_ids(key)
def __contains__(self, key):
return ((key in self._vocab) or
(key in self.aux_word_id) or
self.approximate(key))
def __len__(self):
return len(self._vocab) + len(self.aux_word_id)
def __delitem__(self):
raise NotImplementedError("It is quite complex, let us do it in the future")
def format(self, w):
return [f(w) for f in self.formatters]
def approximate(self, w):
f = lambda key: (key in self._vocab) or (key in self.aux_word_id)
return {w_:self[w_] for w_ in self.format(w) if f(w_)}
def approximate_ids(self, key):
ids = [id_ for w, id_ in self.approximate(key).items()]
if not ids:
raise KeyError("{} not found".format(key))
else:
if self.strategy == 'most_frequent':
return min(ids)
else:
return tuple(sorted(ids))
def _expand(self, formatter):
for w in self.word_id:
w_ = formatter(w)
if w_ not in self._vocab:
id_ = self.word_id[w]
self.aux_word_id[w_].append(id_)
def expand(self, formatters):
for formatter in formatters:
self._expand(formatter)
if self.strategy == 'average':
self.aux_word_id = {w: tuple(sorted(ids)) for w, ids in iteritems(self.aux_word_id)}
elif self.strategy == 'most_frequent':
self.aux_word_id = {w: min(ids) for w, ids in iteritems(self.aux_word_id)}
else:
raise ValueError("A strategy is needed")
words_added = self.aux_word_id.keys()
old_no = len(self._vocab)
new_no = len(self.aux_word_id)
logger.info("We have {} original words.".format(old_no))
logger.info("Added {} new words.".format(new_no))
logger.info("The new total number of words is {}".format(len(self)))
logger.debug(u"Words added\n{}\n" | .format(u" ".join(words_added)))
class CaseExpander(VocabExpander):
def __init__(self, vocabulary, | strategy='most_frequent'):
formatters = [lambda x: x.lower(),
lambda x: x.title(),
lambda x: x.upper()]
super(CaseExpander, self).__init__(vocabulary=vocabulary, formatters=formatters, strategy=strategy)
class DigitExpander(VocabExpander):
def __init__(self, vocabulary, strategy='most_frequent'):
pattern = re.compile("[0-9]", flags=re.UNICODE)
formatters = [lambda x: pattern.sub("#", x)]
super(DigitExpander, self).__init__(vocabulary=vocabulary, formatters=formatters, strategy=strategy)
|
ExCiteS/geokey-wegovnow | geokey_wegovnow/templatetags/wegovnow.py | Python | mit | 470 | 0 | """Custom WeGovNow template tags."""
from django import template
register = temp | late.Library()
@register.filter()
def exclude_uwum_app(apps):
"""Exclude UWUM app."""
apps_without_uwum = []
| for app in apps:
if app.provider.id != 'uwum':
apps_without_uwum.append(app)
return apps_without_uwum
@register.filter()
def exclude_uwum_accounts(accounts):
"""Exclude UWUM accounts."""
return accounts.exclude(provider='uwum')
|
phlax/translate | translate/storage/jsonl10n.py | Python | gpl-2.0 | 10,135 | 0.000493 | # -*- coding: utf-8 -*-
#
# Copyright 2007,2009-2011 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
r"""Class that manages JSON data files for translation
JSON is an acronym for JavaScript Object Notation, it is an open standard
designed for human-readable data interchange.
JSON basic types:
- Number (integer or real)
- String (double-quoted Unicode with backslash escaping)
- Boolean (true or false)
- Array (an ordered sequence of values, comma-separated and enclosed in square
brackets)
- Object (a collection of key:value pairs, comma-separated and enclosed in
curly braces)
- null
Example:
.. code-block:: json
{
"firstName": "John",
"lastName": "Smith",
"age": 25,
"address": {
"streetAddress": "21 2nd Street",
"city": "New York",
"state": "NY",
"postalCode": "10021"
},
"phoneNumber": [
{
"type": "home",
"number": "212 555-1234"
},
{
"type": "fax",
"number": "646 555-4567"
}
]
}
TODO:
- Handle ``\u`` and other escapes in Unicode
- Manage data type storage and conversion. True --> "True" --> True
"""
import json
import os
from collections import OrderedDict
import six
from translate.storage import base
class JsonUnit(base.TranslationUnit):
"""A JSON entry"""
def __init__(self, source=None, ref=None, item=None, **kwargs):
self._id = None
self._item = str(os.urandom(30))
if item is not None:
self._item = item
self._ref = {}
if ref is not None:
self._ref = ref
if ref is None and item is None:
self._ref[self._item] = ""
if source:
self.source = source
super(JsonUnit, self).__init__(source)
def getsource(self):
return self.target
def setsource(self, source):
self.target = source
source = property(getsource, setsource)
def gettarget(self):
def change_type(value):
if isinstance(value, bool):
return str(value)
return value
if isinstance(self._ref, list):
return change_type(self._ref[self._item])
elif isinstance(self._ref, dict):
return change_type(self._ref[self._item])
def settarget(self, target):
def change_type(oldvalue, newvalue):
if isinstance(oldvalue, bool):
newvalue = bool(newvalue)
return newvalue
if isinstance(self._ref, list):
self._ref[int(self._item)] = change_type(self._ref[int(self._item)],
target)
elif isinstance(self._ref, dict):
self._ref[self._item] = change_type(self._ref[self._item], target)
else:
raise ValueError("We don't know how to handle:\n"
"Type: %s\n"
"Value: %s" % (type(self._ref), target))
target = property(gettarget, settarget)
def setid(self, value):
self._id = value
def getid(self):
return self._id
def getlocations(self):
return [self.getid()]
class JsonFile(base.TranslationStore):
"""A JSON file"""
UnitClass = JsonUnit
def __init__(self, inputfile=None, filter=None, **kwargs):
"""construct a JSON file, optionally reading in from inputfile."""
super(JsonFile, self).__init__(**kwargs)
self._filter = filter
self.filename = ''
self._file = u''
if inputfile is not None:
self.parse(inputfile)
def serialize(self, out):
units = OrderedDict()
for unit in self.unit_iter():
path = unit.getid().lstrip('.')
units[path] = unit.target
out.write(json.dumps(units, separators=(',', ': '),
indent=4, ensure_ascii=False).encode(self.encoding))
out.write(b'\n')
def _extract_translatables(self, data, stop=None, prev="", name_node=None,
name_last_node=None, last_node=None):
"""Recursive function to extract items from the data files
:param data: the current branch to walk down
:param stop: a list of leaves to extract or None to extract everything
:param prev: the hierarchy of the tree at this iteration
:param name_node:
:param name_last_node: the name of the last node
:param last_node: the last list or dict
"""
if isinstance(data, dict):
for k, v in six.iteritems(data):
for x in self._extract_translatables(v, stop,
"%s.%s" % (prev, k),
k, None, data):
yield x
elif isinstance(data, list):
for i, item in enumerate(data):
for x in self._extract_translatables(item, stop,
"%s[%s]" % (prev, i),
i, name_node, data):
yield x
# apply filter
elif (stop is None or
(isinstance(last_node, dict) and name_node in stop) or
(isinstance(last_node, list) and name_last_node in stop)):
if isinstance(data, six.string_types):
yield (prev, data, last_node, name_node)
elif isinstance(data, bool):
yield (prev, str(data), last_node, name_node)
elif data is None:
pass
else:
raise ValueError("We don't handle these values:\n"
"Type: %s\n"
"Data: %s\n"
"Previous: %s" % (type(data), data, prev))
def parse(self, input):
"""parse the given file or file source string"""
if hasattr(input, 'name'):
self.filename = input.name
elif not getattr(self, 'filename', ''):
self.filename = ''
if hasattr(input, "read"):
src = input.read()
input.close()
input = src
if isinstance(input, bytes):
input = input.decode('utf- | 8')
try:
self._file = json.loads(input, object_pairs_hook=OrderedDict)
except ValueError as e:
raise base.ParseError(e.message)
for k, data, ref, item in self._extract_translatables(self._file,
stop=self._filter):
unit = self.UnitClass(data, ref, item)
unit.setid(k)
self.addunit(unit)
class JsonNestedFile(JsonFile):
"""A JSON file with nested keys"""
d | ef serialize(self, out):
def nested_set(target, path, value):
if len(path) > 1:
if path[0] not in target:
target[path[0]] = OrderedDict()
nested_set(target[path[0]], path[1:], value)
else:
target[path[0]] = value
units = OrderedDict()
for unit in self.unit_iter():
path = unit.getid().lstrip('.').split('.')
nested_set(units, path, unit.target)
out.write(json.dumps(units, separators=(',', ': '),
indent=4, ensure_ascii=False).encode(self.encoding))
|
schiz21c/CDListPy | setup_py2exe.py | Python | mit | 1,169 | 0.017964 | from distutils.core import setup
import py2exe, sys
sys.argv.append('py2exe')
MANIFEST = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
langu | age="*"
/>
</dependentAssembly>
</dependency>
</assembly>
"""
|
setup(
options = {'py2exe': {'packages': ['encodings', 'wx'],
'bundle_files': 1,
'compressed': 1,
'optimize': 2,
'dll_excludes': ['mswsock.dll', 'powrprof.dll']
}},
windows = [{'script': 'CDListPy.py',
'icon_resources': [(1, 'ico/BeOS_BeBox_grey.ico'), (0, 'ico/BeOS_BeBox_grey.ico')],
'dest_base': 'CDListPy',
#'other_resources': [(24, 1, MANIFEST)],
}],
zipfile = None,
)
|
thomashuang/Lilac | lilac/controller/user.py | Python | lgpl-3.0 | 6,114 | 0.009323 | #!/usr/bin/env python
import re
from lilac.controller import ADMIN, LOGGER
from lilac.orm import Backend
from lilac.tool import access, set_secure_cookie
from lilac.model import User
from solo.template import render_template
from solo.web.util import jsonify
from solo.web import ctx
from webob import exc
from lilac.paginator import Paginator
USER_STATUSES = {
'actived': 'actived',
'banned': 'banned',
}
USER = 'user'
ROOT = 'root'
ADMIN = 'administrator'
ROLES = {
# 'root' : 'root',
'administrator': 'administrator',
'user': 'user'
}
def user_menu(m):
ctl = UserController()
# User Api
m.connect('userinfo', '/userinfo', controller=ctl, action='userinfo')
m.connect('login_page', '/login', controller=ctl, action='login_page', conditions=dict(method=["GET"]))
m.connect('login', '/login', controller=ctl, action='login', conditions=dict(method=["POST"]))
m.connect('logout', '/logout', controller=ctl, action='logout')
m.connect('add_user_page', '/user/add', controller=ctl, action='add_page', conditions=dict(method=["GET"]))
m.connect('add_user', '/user/add', controller=ctl, action='add', conditions=dict(method=["POST"]))
m.connect('user_index', '/user', controller=ctl, action='index', conditions=dict(method=["GET"]))
m.connect('edit_user_page', '/user/:uid/edit', controller=ctl, action='edit_page', conditions=dict(method=["GET"]))
m.connect('edit_user', '/user/:uid/edit', controller=ctl, action='edit', conditions=dict(method=["POST"]))
class UserController(object):
@access()
def index(self, page=1):
user = ctx.request.user
if user.role != 'root':
raise exc.HTTPFound(location='/user/%d/edit' % (user.uid))
page = int(page)
users = Backend('user').paginate(page, 10)
return render_template('user.index.html', users=users)
@jsonify
def userinfo(self):
return ctx.request.user
def login_page(self):
if ctx.request.user.uid != 0:
raise exc.HTTPFound('/task')
return render_template('login.html')
def login(self, username='', password=''):
LOGGER.error('username=%s', username)
username = username.strip()
password = password.strip()
user = Backend('user').find_by_username(username)
if user and user.check(password):
set_secure_cookie('auth', str(user.uid))
LOGGER.info('success')
raise exc.HTTPFound(location='/task')
return render_template('login.html')
def logout(self):
if ctx.request.user.uid != 0:
ctx.response.delete_cookie('auth')
raise exc.HTTPFound(location='/login')
@access(ROOT)
def add_page(self):
return render_template('user.add.html', statuses=USER_STATUSES, roles=ROLES)
@jsonify
@access(ROOT)
def add(self, username, email, real_name, password, status='', role='user'):
username, real_name = username.strip(), real_name.strip()
if not re.match(r'^[A-Za-z0-9_]{4,16}$', username):
return {'status' : 'error', 'msg' : 'user name: %s must be the ^[A-Za-z0-9_]{4,16}$ pattern' %(username)}
if not re.match(r'^[A-Za-z0-9_ ]{4,16}$', real_name):
return {'status' : 'error', 'msg' : 'real name: %s must be the [A-Za-z0-9_]{4,16} pattern' %(real_name)}
if not re.match(r'^[A-Za-z0-9@#$%^&+=]{4,16}$', password):
return {'status' : 'error', 'msg' : 'password: %s must be the ^[A-Za-z0-9@#$%^&+=]{4,16}$ pattern' %(password)}
if status not in USER_STATUSES:
status = 'actived'
if role not in ROLES:
role = 'user'
if len(email) > 7 and re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email):
if Backend('user').find_by_email(email):
return {'status' : 'error', 'msg' : 'email:%s is used' %(email)}
if Backend('user').find_by_username(username):
| return {'status' : 'error', 'msg' : 'user name:%s is used' %(username)}
user = User(username, email, real_name, password, status, role)
Backend('user').save(user)
return {'status' : 'info', 'msg' : 'saved'}
@access()
def edit_page(self, uid):
uid = int(uid)
user = Backend('user').find(uid)
if not user:
raise exc.HTTPNotFound('Not Found')
return render_template('user.edit.html', statuses=USER_STATUSES, roles=ROLES | , user=user)
@jsonify
@access()
def edit(self, uid, email, real_name, password, newpass1, newpass2, status, role='user'):
real_name, newpass1, newpass2 = real_name.strip(), newpass1.strip(), newpass2.strip()
uid = int(uid)
user = Backend('user').find(uid)
if not user:
raise exc.HTTPNotFound('user not found')
me = ctx.request.user
if me.uid == user.uid:
if re.match(r'[A-Za-z0-9@#$%^&+=]{4,16}', newpass1):
if password and newpass1 and newpass1 == newpass2:
user.password = newpass1
elif newpass1:
return {'status' : 'error', 'msg' : 'password: %s must be the [A-Za-z0-9_]{4,16} pattern' %(newpass1)}
if len(email) > 7 and re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email):
user_ = Backend('user').find_by_email(email)
if user_ and user_.uid != user.uid:
return {'status' : 'error', 'msg' : 'email:%s is used' %(email)}
else:
user.email = email
if me.uid == 1 and user.uid != 1:
if role in (ADMIN, USER):
user.role = role
if user.status != status and status in USER_STATUSES:
user.status = status
if re.match(r'^[A-Za-z0-9_ ]{4,16}$', real_name):
if user.real_name != real_name:
user.real_name = real_name
Backend('user').save(user)
return {'status' : 'info', 'msg' : 'updated'}
|
somebody1234/Charcoal | interpreterprocessor.py | Python | mit | 34,121 | 0.000645 | # TODO: direction list operator?
from direction import Direction, Pivot
from charcoaltoken import CharcoalToken as CT
from unicodegrammars import UnicodeGrammars
from wolfram import (
String, Rule, DelayedRule, Span, Repeated, RepeatedNull, PatternTest,
Number, Expression
)
import re
from math import floor, ceil
def FindAll(haystack, needle):
r = []
if isinstance(haystack, str):
index = haystack.find(needle)
while True:
if ~index:
r += [index]
else:
return r
index = haystack.find(needle, index + 1)
else:
return [i for i, item in (haystack.items() if isinstance(haystack, dict) else enumerate(haystack)) if item == needle]
def ListFind(haystack, needle):
if isinstance(haystack, dict):
for i, item in haystack.items():
if item == needle:
return i
return None
return haystack.index(needle) if needle in haystack else -1
def dedup(iterable):
iterable = iterable[:]
items = []
i = 0
for item in iterable:
if item in items:
del iterable[i]
else:
i += 1
items += [item]
return iterable
def iter_apply(iterable, function):
clone = iterable[:]
clone[:] = [function(item) for item in clone]
return clone
def itersplit(iterable, number):
result = []
while len(iterable):
result += [iterable[:number]]
iterable = iterable[number:]
return result
def negate_str(string):
try:
return float(string) if "." in string else int(string)
except:
return string[::-1]
def abs_str(string):
try:
return abs(float(string) if "." in string else int(string))
except:
return string # ???
def _int(obj):
if isinstance(obj, str) and re.match("\d+\.?\d*$", obj):
return int(float(obj))
return int(obj)
def product(item):
result = 1
for part in item:
result *= part
return result
def Negate(item):
if isinstance(item, int) or isinstance(item, float):
return -item
if isinstance(item, str):
return negate_str(item)
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
return String(negate_str(str(item)))
if hasattr(item, "__iter__"):
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Negate)
def Abs(item):
if isinstance(item, int) or isinstance(item, float):
return abs(item)
if isinstance(item, str):
return abs_str(item)
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
return String(abs_str(str(item)))
if hasattr(item, "__iter__"):
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Abs)
def Sum(item):
if isinstance(item, float):
item = int(item)
if isinstance(item, int):
result = 0
while item:
result += item % 10
item //= 10
return result
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = str(item)
if isinstance(item, str):
if all(c in "0123456789." for c in item) and item.count(".") < 2:
return sum([0 if c == "." else int(c) for c in item | ])
return sum(
float(c) if "." in c else int(c)
for c in re.findall("\d+\.?\d*|\.\d+", item)
)
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
| if isinstance(item[0], str):
return "".join(item)
if isinstance(item[0], String):
return "".join(map(str, item))
if isinstance(item[0], list):
return sum(item, [])
return sum(item)
def Product(item):
if isinstance(item, float):
item = int(item)
if isinstance(item, int):
result = 1
while item:
result *= item % 10
item //= 10
return result
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = str(item)
if isinstance(item, str):
if all(c in "0123456789." for c in item) and item.count(".") < 2:
return product([0 if c == "." else int(c) for c in item])
return product(
float(c) if "." in c else int(c)
for c in re.findall("\d+\.?\d*|\.\d+", item)
)
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
# TODO: cartesian product?
# if isinstance(item[0], list):
# return sum(item, [])
return product(item)
def vectorize(fn, afn=None, cast_string=True):
def vectorized(left, right, c):
if isinstance(left, String):
left = str(left)
if isinstance(right, String):
right = str(right)
if type(left) == Expression:
left = left.run()
if type(right) == Expression:
right = right.run()
left_type = type(left)
right_type = type(right)
left_is_iterable = (
hasattr(left, "__iter__") and not isinstance(left, str)
)
right_is_iterable = (
hasattr(right, "__iter__") and not isinstance(right, str)
)
if left_is_iterable or right_is_iterable:
if left_is_iterable and right_is_iterable:
result = afn(left, right, c) if afn else [
vectorized(l, r, c) for l, r in zip(left, right)
]
else:
result = (
[vectorized(item, right, c) for item in left]
if left_is_iterable else
[vectorized(left, item, c) for item in right]
)
result_type = type(left if left_is_iterable else right)
try:
return result_type(result)
except:
return result_type(result, left if left_is_iterable else right)
if cast_string and left_type == str:
left = (float if "." in left else int)(left)
if cast_string and right_type == str:
right = (float if "." in right else int)(right)
return fn(left, right, c)
return vectorized
def Incremented(item):
if isinstance(item, float) or isinstance(item, int):
return round(item + 1, 15)
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = str(item)
if isinstance(item, str):
item = float(item) if "." in item else int(item)
return Incremented(item)
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Incremented)
def Decremented(item):
if isinstance(item, float) or isinstance(item, int):
return round(item - 1, 15)
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = str(item)
if isinstance(item, str):
item = float(item) if "." in item else int(item)
return Decremented(item)
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Decremented)
def Doubled(item):
if isinstance(item, float) or isinstance(item, int):
return round(item * 2, 15)
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = str(item)
if isinstance(item, str):
item = float(item) if "." in item else int(item)
return Doubled(item)
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item |
dandygithub/kodi | addons/context.dandy.kinopoisk.sc/resources/lib/sc_czxto.py | Python | gpl-3.0 | 1,100 | 0.01184 | import sys
import urllib, urllib2
import xbmc
import xbmcgui
import XbmcHelpers
common = XbmcHelpers
from videohosts import tools
URL = "http://czx.to"
HEADERS = {
"Ho | st": "czx.to",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
}
VALUES = {
}
_kp_id_ = ''
def get_content():
vh_title = "czx.to"
list_li = []
response = tools.get_response(URL + '/' + str(_kp_id_) + '/', HEAD | ERS, VALUES, 'GET')
if response:
iframe = common.parseDOM(response, "iframe", ret="src")[0]
title_ = "*T*"
title = "[COLOR=orange][{0}][/COLOR] {1}".format(vh_title, tools.encode(title_))
uri = sys.argv[0] + "?mode=show&url={0}".format(urllib.quote_plus(iframe))
item = xbmcgui.ListItem(title)
list_li.append([uri, item, True])
return list_li
def process(kp_id):
global _kp_id_
_kp_id_ = kp_id
xbmc.log("czx.to:kp_id=" + kp_id)
list_li = []
try:
list_li = get_content()
except:
pass
return list_li
|
overdrive3000/skytools | python/pgq/__init__.py | Python | isc | 980 | 0.002041 | """PgQ framework for Python."""
__pychecker__ = 'no | -miximport'
import pgq.event
import pgq.consumer
import pgq.remoteconsumer
import pgq.producer
import pgq.status
import pgq.cascade
import pgq.cascade.nodeinfo
import pgq.cascade.admin
import pgq.cascade.consumer
import pgq.cascade.worker
from pgq.event import *
from pgq.consumer import *
from pgq.coopcon | sumer import *
from pgq.remoteconsumer import *
from pgq.localconsumer import *
from pgq.producer import *
from pgq.status import *
from pgq.cascade.nodeinfo import *
from pgq.cascade.admin import *
from pgq.cascade.consumer import *
from pgq.cascade.worker import *
__all__ = (
pgq.event.__all__ +
pgq.consumer.__all__ +
pgq.coopconsumer.__all__ +
pgq.remoteconsumer.__all__ +
pgq.localconsumer.__all__ +
pgq.cascade.nodeinfo.__all__ +
pgq.cascade.admin.__all__ +
pgq.cascade.consumer.__all__ +
pgq.cascade.worker.__all__ +
pgq.producer.__all__ +
pgq.status.__all__ )
|
0rpc/zerorpc-crosstests | python/server.py | Python | mit | 1,678 | 0.002982 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2016 François-Xavier Bourlet (bombela+zerorpc@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import logging
import sys
import zerorpc
logging.basicConfig(level=logging.DEBUG)
endpoint = sys.argv[ | 1]
class TestServer(zerorpc.Se | rver):
def echo(self, v):
print("echo {0} <{1}>".format(type(v), v))
return v
def quit(self):
print("exiting...")
self.stop()
server = TestServer()
server.bind(endpoint)
print('Server started', endpoint)
server.run()
|
cuteredcat/aukro | aukro/parser.py | Python | mit | 2,291 | 0.004822 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from lxml.html import fromstring, make_links_absolute, tostring
from urlparse import parse_qs, urlparse
import cookielib, json, importlib, re, urllib, urllib2
RE_URL = re.compile(r"""(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<> | ?«»“”‘’]))""")
class HTTPRedirectHandler(urllib2.HTTPRedirectHandler):
def redirect_request(self, req, fp, code, msg, headers, newurl):
newreq = urllib2.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl)
if newreq is not None: self.redirections.append(newreq.get_full_url())
return newreq
class Parser():
def __init__(self, charset='cp1251', *args, **kwargs):
self.charset = charset
self.cook | ie = cookielib.CookieJar()
self.headers = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.93 Safari/537.36'),
('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')]
def link(self, link):
if isinstance(link, unicode):
link = link.encode('utf-8')
if re.match(RE_URL, link):
return link
return None
def data(self, link):
link = self.link(link)
if link:
return parse_qs(urlparse(link).query, keep_blank_values=True)
else:
return None
def html(self, el):
return tostring(el)
def grab(self, link, tree=True):
try:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie))
opener.addheaders = self.headers
link = self.link(link)
socket = opener.open(link)
if self.charset is None:
content = socket.read()
else:
content = unicode(socket.read(), self.charset)
socket.close()
except:
content = None
if content and tree:
content = make_links_absolute(fromstring(content), link, resolve_base_href=True)
return content
|
technoarch-softwares/facebook-auth | setup.py | Python | bsd-2-clause | 1,410 | 0.002128 | import os
from setuptools import find_packages, setup
wi | th open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
install_requires = [
'requests==2.8.1'
]
setup(
name='facebook-auth',
version='0.1',
pa | ckages=find_packages(),
include_package_data=True,
license='BSD License', # example license
description='A simple Django app for facebook authentcation.',
long_description=README,
url='https://github.com/technoarch-softwares/facebook-auth',
author='Pankul Mittal',
author_email='mittal.pankul@gmail.com',
install_requires = install_requires,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8', # replace "X.Y" as appropriate
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
zip_safe=False,
)
|
murarugeorgec/USB-checking | USB/USB_DBus/usb_dbus_system.py | Python | gpl-3.0 | 1,795 | 0.016713 | #! /usr/bin/env python
# DBus to turn USB on or off (by unbinding the driver)
# The System D-bus
import dbus
import dbus.service
from gi.repository import GLib
from dbus.mainloop.glib import DBusGMainLoop
from usb_inhibit import USB_inhibit
class USB_Se | rvice_Blocker(dbus.service.Object):
inhibitor_work = False
def __init__(self):
self.usb_monitor = USB_inhibit(True)
bus_name = dbus.service.BusName('org.gnome.USBBlocker', bus=dbus.SystemBus())
dbus.service.Object.__init__(self, bus_name, '/org/gnome/USBBlocker')
@dbus.service.method(dbus_interface='org.gnome.USBBlocker.inhibit', \
in_signature='', out_signature='b | ')
def get_status(self):
return self.inhibitor_work
@dbus.service.method(dbus_interface='org.gnome.USBBlocker.inhibit')
def start(self):
print("Start monitoring Dbus system message")
if not self.inhibitor_work:
self.usb_monitor.start()
self.inhibitor_work = True
@dbus.service.method(dbus_interface='org.gnome.USBBlocker.inhibit')
def stop(self):
print("Stop monitoring Dbus system message")
if self.inhibitor_work:
self.usb_monitor.stop()
self.inhibitor_work = False
@dbus.service.method(dbus_interface='org.gnome.USBBlocker.device',
in_signature='ss', out_signature='b')
def enable_device(self, bus_id, dev_id):
print (bus_id)
print (dev_id)
import time; time.sleep(0.03)
return self.usb_monitor.bind_driver(bus_id, dev_id)
DBusGMainLoop(set_as_default=True)
dbus_service = USB_Service_Blocker()
mainloop = GLib.MainLoop()
try:
mainloop.run()
except KeyboardInterrupt:
print("\nThe MainLoop will close...")
mainloop.quit()
|
suutari/shoop | shuup_tests/front/test_product.py | Python | agpl-3.0 | 1,368 | 0.002193 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from django.core.urlre | solvers import reverse
from shuup.testing.factories import create_product, get_default_product, get_default_shop
@pytest.mark.django_db
def test_product_page(client):
get_default_shop()
product = get_default_product()
res | ponse = client.get(
reverse('shuup:product', kwargs={
'pk': product.pk,
'slug': product.slug
}
)
)
assert b'no such element' not in response.content, 'All items are not rendered correctly'
# TODO test purchase_multiple and sales_unit.allow_fractions
@pytest.mark.django_db
def test_package_product_page(client):
shop = get_default_shop()
parent = create_product("test-sku-1", shop=shop)
child = create_product("test-sku-2", shop=shop)
parent.make_package({child: 2})
assert parent.is_package_parent()
response = client.get(
reverse('shuup:product', kwargs={
'pk': parent.pk,
'slug': parent.slug
}
)
)
assert b'no such element' not in response.content, 'All items are not rendered correctly'
|
mattaw/SoCFoundationFlow | admin/waf/waf-extensions/SFFerrors.py | Python | apache-2.0 | 77 | 0 |
class Error(Exception):
| def | __init__(self, msg):
self.msg = msg
|
eelanagaraj/IST_project | LSTM_ISTapps.py | Python | mit | 5,130 | 0.020663 | #! /usr/bin/env python
""" time to run LSTM on this bad boy! """
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import cPickle as pkl
from keras.optimizers import SGD, RMSprop, Adagrad
from keras.utils import np_utils
from keras.mod | els import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM, GRU
from text_processing.ISTapps import load_ISTapps
#from ISTapps import load_ISTapps
from sklearn import cross_validation
# different structures to test out
"""
# trial 1: kept memory fau | lting at a certain point
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
optimizers = ['adam', sgd, 'adagrad', 'adadelta', 'rmsprop']
LSTM_ins = [128, 256, 512]
LSTM_outs = [128, 256]
activations = ['sigmoid', 'relu', 'softmax', 'tanh']
loss_functions = ['binary_crossentropy', 'mean_squared_error']
# trial 2: cross validation settings
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
optimizers = ['adam']
LSTM_ins = [256, 512]
LSTM_outs = [128, 256]
activations = ['sigmoid', 'relu']
loss_functions = ['binary_crossentropy']
#trial 3: try different optimizers with other settings constant
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
optimizers = [sgd, 'adagrad', 'adadelta', 'rmsprop', 'adam']
LSTM_ins = [256]
LSTM_outs = [128]
activations = ['sigmoid']
loss_functions = ['binary_crossentropy']
"""
# trial 4: try basically all combos except adadelta
sgd1dec = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
sgd1 = SGD(lr=0.1, momentum=0., decay=0., nesterov=False)
sgd01 = SGD(lr=0.01, momentum=0., decay=0., nesterov=False)
sgd001 = SGD(lr=0.001, momentum=0., decay=0., nesterov=False)
optimizers = ['sgd', sgd1, sgd01, sgd001, sgd1dec, 'adam', 'rmsprop', 'adadelta']
LSTM_in_out = [(128, 128), (128, 256), (256,128)]
activations = ['sigmoid', 'tanh', 'relu', 'softmax']
loss_functions = ['mean_squared_error', 'binary_crossentropy']
max_features=100000
maxlen = 30 # cut texts after this number of words
batch_size = 16
k = 5 # cross-validation
#dataset_size = 15000
#results = {}
max_avg = 0
opt_settings = []
for optimizer in optimizers :
for loss_func in loss_functions :
for activation in activations :
for (LSTM_in, LSTM_out) in LSTM_in_out :
settings = (optimizer, loss_func, activation, LSTM_in, LSTM_out)
print("Loading data...")
(X,y) = load_ISTapps(maxlen, seed=111)
# is there data signal ??! --> shrink dataset
#X = X[:dataset_size]
#y = y[:dataset_size]
print("Settings: ", settings)
# cross validation
kfold_indices = cross_validation.KFold(len(X), n_folds=k)
cv_round = 0
cumulative_acc = [0]*k
for train_indices, test_indices in kfold_indices :
X_train = X[train_indices]
y_train = y[train_indices]
X_test = X[test_indices]
y_test = y[test_indices]
print("Cross Validation split ", cv_round)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, LSTM_in))
model.add(LSTM(LSTM_in, LSTM_out)) # try using a GRU instead, for fun
model.add(Dropout(0.5))
model.add(Dense(LSTM_out, 1))
model.add(Activation(activation))
# try using different optimizers and different optimizer configs
model.compile(loss=loss_func, optimizer=optimizer, class_mode="binary")
print("Train...")
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=5,
validation_split=0.1, show_accuracy=True, verbose=2)
score = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
classes = [int(val > 0.55) for val in model.predict(X_test, batch_size=batch_size)]
#classes = model.predict_classes(X_test, batch_size=batch_size)
acc = np_utils.accuracy(classes, y_test)
print('Test accuracy:', acc)
cumulative_acc[cv_round] = acc
cv_round += 1
# try to conserve some memory cause getting weird memory errors
del X_train
del y_train
del X_test
del y_test
del model
cross_val_acc = sum(cumulative_acc) / k
# keep track of current maximum average and settings
if (max_avg < cross_val_acc) :
max_avg = cross_val_acc
opt_settings = (settings)
with open('/home/enagaraj/cumulative_results.txt', 'a') as f :
print ('\nsettings: ', settings, 'accuracies: ', cumulative_acc, 'avg acc: ', cross_val_acc, file=f)
#results[settings] = (cumulative_acc, cross_val_acc)
print ('Average accuracy: ', cross_val_acc)
# again try to satisfy the memory gods
del X
del y
# calculate best value
#vals = results.values()
#max_avg = 0
#for lst,avg in vals :
# if avg > maxv :
# max_avg = avg
with open('/home/enagaraj/cumulative_results_len30.txt', 'a') as f :
print ('\nmax average: ', max_avg, 'optimal settings: ', opt_settings, file=f)
#print (results) |
jsvine/pdfplumber | pdfplumber/utils.py | Python | mit | 20,406 | 0.000441 | import itertools
from operator import itemgetter
from pdfminer.pdftypes import PDFObjRef
from pdfminer.psparser import PSLiteral
from pdfminer.utils import PDFDocEncoding
DEFAULT_X_TOLERANCE = 3
DEFAULT_Y_TOLERANCE = 3
DEFAULT_X_DENSITY = 7.25
DEFAULT_Y_DENSITY = 13
def cluster_list(xs, tolerance=0):
if tolerance == 0:
return [[x] for x in sorted(xs)]
if len(xs) < 2:
return [[x] for x in sorted(xs)]
groups = []
xs = list(sorted(xs))
current_group = [xs[0]]
last = xs[0]
for x in xs[1:]:
if x <= (last + tolerance):
current_group.append(x)
else:
groups.append(current_group)
current_group = [x]
last = x
groups.append(current_group)
return groups
def make_cluster_dict(values, tolerance):
clusters = cluster_list(set(values), tolerance)
nested_tuples = [
[(val, i) for val in value_cluster] for i, value_cluster in enumerate(clusters)
]
return dict(itertools.chain(*nested_tuples))
def cluster_objects(objs, attr, tolerance):
attr_getter = itemgetter(attr) if isinstance(attr, (str, int)) else attr
objs = to_list(objs)
values = map(attr_getter, objs)
cluster_dict = make_cluster_dict(values, tolerance)
get_0, get_1 = itemgetter(0), itemgetter(1)
cluster_tuples = sorted(
((obj, cluster_dict.get(attr_getter(obj))) for obj in objs), key=get_1
)
grouped = itertools.groupby(cluster_tuples, key=get_1)
return [list(map(get_0, v)) for k, v in grouped]
def decode_text(s):
"""
Decodes a PDFDocEncoding string to Unicode.
Adds py3 compatibility to pdfminer's version.
"""
if type(s) == bytes and s.startswith(b"\xfe\xff"):
return str(s[2:], "utf-16be", "ignore")
ords = (ord(c) if type(c) == str else c for c in s)
return "".join(PDFDocEncoding[o] for o in ords)
def resolve_and_decode(obj):
"""Recursively resolve the metadata values."""
if hasattr(obj, "resolve"):
obj = obj.resolve()
if isinstance(obj, list):
return list(map(resolve_and_decode, obj))
elif isinstance(obj, PSLiteral):
return decode_text(obj.name)
elif isinstance(obj, (str, bytes)):
return decode_text(obj)
elif isinstance(obj, dict):
for k, v in obj.items():
obj[k] = resolve_and_decode(v)
return obj
return obj
def decode_psl_list(_list):
return [
decode_text(value.name) if isinstance(value, PSLiteral) else value
for value in _list
]
def resolve(x):
if type(x) == PDFObjRef:
return x.resolve()
else:
return x
def get_dict_type(d):
if type(d) is not dict:
return None
t = d.get("Type")
if type(t) is PSLiteral:
return decode_text(t.name)
else:
return t
def resolve_all(x):
"""
Recursively resolves the given object and all the internals.
"""
t = type(x)
if t == PDFObjRef:
resolved = x.resolve()
# Avoid infinite recursion
if get_dict_type(resolved) == "Page":
return x
return resolve_all(resolved)
elif t in (list, tuple):
return t(resolve_all(v) for v in x)
elif t == dict:
exceptions = ["Parent"] if get_dict_type(x) == "Annot" else []
return {k: v if k in exceptions else resolve_all(v) for k, v in x.items()}
else:
return x
def is_dataframe(collection):
cls = collection.__class__
name = ".".join([cls.__module__, cls.__name__])
return name == "pandas.core.frame.DataFrame"
def to_list(collection):
if is_dataframe(collection):
return collection.to_dict("records") # pragma: nocover
else:
return list(collection)
def dedupe_chars(chars, tolerance=1):
"""
Removes duplicate chars — those sharing the same text, fontname, size,
and positioning (within `tolerance`) as other characters in the set.
"""
key = itemgetter("fontname", "size", "upright", "text")
pos_key = itemgetter("doctop", "x0")
def yield_unique_chars(chars):
sorted_chars = sorted(chars, key=key)
for grp, grp_chars in itertools.groupby(sorted_chars, key=key):
for y_cluster in cluster_objects(grp_chars, "doctop", tolerance):
for x_cluster in cluster_objects(y_cluster, "x0", tolerance):
yield sorted(x_cluster, key=pos_key)[0]
deduped = yield_unique_chars(chars)
return sorted(deduped, key=chars.index)
def objects_to_rect(objects):
return {
"x0": min(map(itemgetter("x0"), objects)),
"x1": max(map(itemgetter("x1"), objects)),
"top": min(map(itemgetter("top"), objects)),
"bottom": max(map(itemgetter("bottom"), objects)),
}
def objects_to_bbox(objects):
return (
min(map(itemgetter("x0"), objects)),
min(map(itemgetter("top"), objects)),
max(map(itemgetter("x1"), objects)),
max(map(itemgetter("bottom"), objects)),
)
obj_to_bbox = itemgetter("x0", "top", "x1", "bottom")
def bbox_to_rect(bbox):
return {"x0": bbox[0], "top": bbox[1], "x1": bbox[2], "bottom": bbox[3]}
def merge_bboxes(bboxes):
"""
Given a set of bounding boxes, return the smallest bounding box that
contains them all.
"""
return (
min(map(itemgetter(0), bboxes)),
min(map(itemgetter(1), bboxes)),
max(map(itemgetter(2), bboxes)),
max(map(itemgetter(3), bboxes)),
)
DEFAULT_WORD_EXTRACTION_SETTINGS = dict(
x_tolerance=DEFAULT_X_TOLERANCE,
y_tolerance=DEFAULT_Y_TOLERANCE,
keep_blank_chars=False,
use_text_flow=False,
horizontal_ltr=True, # Should words be read left-to-right?
vertical_ttb=True, # Should vertical words be read top-to-bottom?
extra_attrs=[],
)
class WordExtractor:
def __init__(self, **settings):
for s, val in settings.items():
if s not in DEFAULT_WORD_EXTRACTION_SETTINGS:
raise ValueError(f"{s} is not a valid WordExtractor parameter")
setattr(self, s, val)
def merge_chars(self, | ordered_chars):
x0, top, x1, bottom = object | s_to_bbox(ordered_chars)
doctop_adj = ordered_chars[0]["doctop"] - ordered_chars[0]["top"]
upright = ordered_chars[0]["upright"]
direction = 1 if (self.horizontal_ltr if upright else self.vertical_ttb) else -1
word = {
"text": "".join(map(itemgetter("text"), ordered_chars)),
"x0": x0,
"x1": x1,
"top": top,
"doctop": top + doctop_adj,
"bottom": bottom,
"upright": upright,
"direction": direction,
}
for key in self.extra_attrs:
word[key] = ordered_chars[0][key]
return word
def char_begins_new_word(self, current_chars, current_bbox, next_char):
upright = current_chars[0]["upright"]
intraline_tol = self.x_tolerance if upright else self.y_tolerance
interline_tol = self.y_tolerance if upright else self.x_tolerance
word_x0, word_top, word_x1, word_bottom = current_bbox
return (
(next_char["x0"] > word_x1 + intraline_tol)
or (next_char["x1"] < word_x0 - intraline_tol)
or (next_char["top"] > word_bottom + interline_tol)
or (next_char["bottom"] < word_top - interline_tol)
)
def iter_chars_to_words(self, chars):
current_word = []
current_bbox = None
for char in chars:
if not self.keep_blank_chars and char["text"].isspace():
if current_word:
yield current_word
current_word = []
current_bbox = None
elif current_word and self.char_begins_new_word(
current_word, current_bbox, char
):
yield current_word
current_word = [char]
current_bbox = obj_to_bbox(char)
else:
current_word.append(char)
if current_bbox is None:
current_bbox = obj_to_bbox(char)
|
sheagcraig/sal | inventory/migrations/0010_auto_20180911_1154.py | Python | apache-2.0 | 937 | 0 | # Generated by Django 1.10 on 2018-09-11 18:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0009_inventory_inventory_str'),
]
operations = [
migrations.AlterField(
model_name='application',
name='id',
field=models.BigAutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='inventory',
name='id',
field=models.BigAutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='inventoryitem',
name='id',
field=models.BigAutoField(primary_key=True, serialize=False),
),
| migrations.AlterField(
model_name='inventoryitem',
name='path',
field | =models.TextField(blank=True, null=True),
),
]
|
glaunay/pyproteins | src/pyproteins/alignment/scoringMatrix.py | Python | gpl-3.0 | 3,051 | 0.016716 | import numpy as np
class SubstitutionMatrix():
def __init__(self):
pass
def __getitem__(self, tup):
y, x = tup
return self.matrix[self.alphabet.index(y)][self.alphabet.index(x)]
class Blosum62(SubstitutionMatrix):
def __init__(self):
SubstitutionMatrix.__init__(self)
self.matrix = np.array
# Matrix made by matblas from blosum62.iij
# * column uses minimum score
# BLOSUM Clustered Scoring Matrix in 1/2 Bit Units
# Blocks Database = /data/blocks_5.0/blocks.dat
# Cluster Percentage: >= 62
# Entropy = 0.6979, Expected = -0.5209
self.alphabet = 'ARNDCQEGHILKMFPSTWYVBZX*'
self.matrix = [
[4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4],
[1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4],
[2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4],
[2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4],
[0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4],
[1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4],
[1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, | -2, 1, 4, -1, -4],
[0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4],
[2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4],
[1, -3, -3, | -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4],
[1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4],
[1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4],
[1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4],
[2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4],
[1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4],
[1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4],
[0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4],
[3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4],
[2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4],
[0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4],
[2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4],
[1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4],
[0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4],
[4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1]
]
|
PythonScanClient/PyScanClient | doc/make_version.py | Python | epl-1.0 | 355 | 0.008451 |
print """
Version Info
============
To obtain version info::
from scan.version import __version__, version_his | tory
print __version__
print version_history
"""
import sys
sys.path.append("..")
f | rom scan.version import __version__, version_history
print "Version history::"
for line in version_history.splitlines():
print (" " + line)
|
catapult-project/catapult | third_party/gsutil/gslib/tests/test_acl.py | Python | bsd-3-clause | 33,748 | 0.003793 | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for the acl command."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import re
from gslib.command import CreateOrGetGsutilLogger
from gslib.cs_api_map import ApiSelector
from gslib.storage_url import StorageUrlFromString
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForGS
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import GenerationFromURI as urigen
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import unittest
from gslib.utils import acl_helper
from gslib.utils.constants import UTF8
from gslib.utils.retry_util import Retry
from gslib.utils.translation_helper import AclTranslation
PUBLIC_READ_JSON_ACL_TEXT = '"entity":"allUsers","role":"READER"'
class TestAclBase(testcase.GsUtilIntegrationTestCase):
"""Integration test case base class for acl command."""
_set_acl_prefix = ['acl', 'set']
_get_acl_prefix = ['acl', 'get']
_set_defacl_prefix = ['defacl', 'set']
_ch_acl_prefix = ['acl', 'ch']
_project_team = 'viewers'
@SkipForS3('Tests use GS ACL model.')
class TestAcl(TestAclBase):
"""Integration tests for acl command."""
def setUp(self):
super(TestAcl, self).setUp()
self.sample_uri = self.CreateBucket()
self.sample_url = StorageUrlFromString(str(self.sample_uri))
self.logger = CreateOrGetGsutilLogger('acl')
# Argument to acl ch -p must be the project number, not a name; create a
# bucket to perform translation.
self._project_number = self.json_api.GetBucket(
self.CreateBucket().bucket_name, fields=['projectNumber']).projectNumber
self._project_test_acl = '%s-%s' % (self._project_team,
self._project_number)
def test_set_invalid_acl_object(self):
"""Ensures that invalid content returns a bad request error."""
obj_uri = suri(self.CreateObject(contents=b'foo'))
inpath = self.CreateTempFile(contents=b'badAcl')
stderr = self.RunGsUtil(self._set_acl_prefix + [inpath, obj_uri],
return_stderr=True,
expected_status=1)
self.assertIn('ArgumentException', stderr)
def test_set_invalid_acl_bucket(self):
"""Ensures that invalid content returns a bad request error."""
bucket_uri = suri(self.CreateBucket())
inpath = self.CreateTempFile(contents=b'badAcl')
stderr = self.RunGsUtil(self._set_acl_prefix + [inpath, bucket_uri],
return_stderr=True,
expected_status=1)
self.assertIn('ArgumentException', stderr)
def test_set_xml_acl_json_api_object(self):
"""Ensures XML content returns a bad request error and migration warning."""
obj_uri = suri(self.CreateObject(contents=b'foo'))
inpath = self.CreateTempFile(contents=b'<ValidXml></ValidXml>')
stderr = self.RunGsUtil(self._set_acl_prefix + [inpath, obj_uri],
return_stderr=True,
expected_status=1)
self.assertIn('ArgumentException', stderr)
self.assertIn('XML ACL data provided', stderr)
def test_set_xml_acl_json_api_bucket(self):
"""Ensures XML content returns a bad request error and migration warning."""
bucket_uri = suri(self.CreateBucket())
inpath = self.CreateTempFile(contents=b'<ValidXml></ValidXml>')
stderr = self.RunGsUtil(self._set_acl_prefix + [inpath, bucket_uri],
return_stderr=True,
expected_status=1)
self.assertIn('ArgumentException', stderr)
self.assertIn('XML ACL data provided', stderr)
def test_set_valid_acl_object(self):
"""Tests setting a valid ACL on an object."""
obj_uri = suri(self.CreateObject(contents=b'foo'))
acl_string = self.RunGsUtil(self._get_acl_prefix + [obj_uri],
return_stdout=True)
inpath = self.CreateTempFile(contents=acl_string.encode(UTF8))
self.RunGsUtil(self._set_acl_prefix + ['public-read', obj_uri])
acl_string2 = self.RunGsUtil(self._get_acl_prefix + [obj_uri],
return_stdout=True)
self.RunGsUtil(self._set_acl_prefix + [inpath, obj_uri])
acl_string3 = self.RunGsUtil(self._get_acl_prefix + [obj_uri],
return_stdout=True)
self.assertNotEqual(acl_string, acl_string2)
self.assertEqual(acl_string, acl_string3)
def test_set_valid_permission_whitespace_object(self):
"""Ensures that whitespace is allowed in role and entity elements."""
obj_uri = suri(self.CreateObject(contents=b'foo'))
acl_string = self.RunGsUtil(self._get_acl_prefix + [obj_uri],
return_stdout=True)
acl_string = re.sub(r'"role"', r'"role" \n', acl_string)
acl_string = re.sub(r'"entity"', r'\n "entity"', acl_string)
inpath = self.CreateTempFile(contents=acl_string.encode(UTF8))
self.RunGsUtil(self._set_acl_prefix + [inpath, obj_uri])
def test_set_valid_acl_bucket(self):
"""Ensures that valid canned and XML ACLs work with get/set."""
if self._ServiceAccountCredentialsPresent():
# See comments in _ServiceAccountCredentialsPresent
return unittest.skip('Canned ACLs orphan service account permissions.')
bucket_uri = suri(self.CreateBucket())
acl_string = self.RunGsUtil(self._get_acl_prefix + [bucket_uri],
return_stdout=True)
inpath = self.CreateTempFile(contents=acl_string.encode(UTF8))
self.RunGsUtil(self._set_acl_prefix + ['public-read', bucket_uri])
acl_string2 = self.RunGsUtil(self._get_acl_prefix + [bucket_uri],
return_stdout=True)
self.RunGsUtil(self._set_acl_prefix + [inpath, bucket_uri])
acl_string3 = self.RunGsUtil(self._get_acl_prefix + [bucket_uri],
return_stdout=True)
self.assertNotEqual(acl_string, acl_string2)
self.assertEqual(acl_string, acl_string3)
def test_invalid_canned_acl_object(self):
"""Ensures that an invalid canned ACL returns a CommandException."""
obj_uri = suri(self.CreateObject(contents=b'foo'))
stderr = self.RunGsUtil(self._set_acl_prefix +
['not-a-canned-acl', obj_uri],
return_st | derr=True,
expected_status=1)
self.assertIn('CommandEx | ception', stderr)
self.assertIn('Invalid canned ACL', stderr)
def test_set_valid_def_acl_bucket(self):
"""Ensures that valid default canned and XML ACLs works with get/set."""
bucket_uri = self.CreateBucket()
# Default ACL is project private.
obj_uri1 = suri(self.CreateObject(bucket_uri=bucket_uri, contents=b'foo'))
acl_string = self.RunGsUtil(self._get_acl_prefix + [obj_uri1],
return_stdout=True)
# Change it to authenticated-read.
self.RunGsUtil(
self._set_defacl_prefix +
['authenticated-read', suri(bucket_uri)])
# Default object ACL may take some time to propagate.
@Retry(AssertionError, tries=5, timeout_secs=1)
def _Check1():
obj_uri2 = suri(self.CreateObject(bucket_uri=bucket_uri,
contents=b'foo2'))
acl_string2 = self.RunGsUtil(self._get_acl_prefix + [obj_uri2],
return_stdout=True)
self.asser |
Miserlou/Emo | emo/code.py | Python | mit | 46,810 | 0.000192 | #
# This file is based on emoji (https://github.com/kyokomi/emoji).
#
# The MIT License (MIT)
#
# Copyright (c) 2014 kyokomi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
emojiCodeDict = {
":capricorn:": u"\U00002651",
":end:": u"\U0001f51a",
":no_mobile_phones:": u"\U0001f4f5",
":couple:": u"\U0001f46b",
":snowman:": u"\U000026c4",
":sunrise_over_mountains:": u"\U0001f304",
":suspension_railway:": u"\U0001f69f",
":arrows_counterclockwise:": u"\U0001f504",
":bug:": u"\U0001f41b",
":confused:": u"\U0001f615",
":dress:": u"\U0001f457",
":honeybee:": u"\U0001f41d",
":waning_crescent_moon:": u"\U0001f318",
":balloon:": u"\U0001f388",
":bus:": u"\U0001f68c",
":package:": u"\U0001f4e6",
":pencil2:": u"\U0000270f",
":rage:": u"\U0001f621",
":space_invader:": u"\U0001f47e",
":white_medium_small_square:": u"\U000025fd",
":fast_forward:": u"\U000023e9",
":rice_cracker:": u"\U0001f358",
":incoming_envelope:": u"\U0001f4e8",
":sa:": u"\U0001f202",
":womens:": u"\U0001f6ba",
":arrow_right:": u"\U000027a1",
":construction_worker:": u"\U0001f477",
":notes:": u"\U0001f3b6",
":goat:": u"\U0001f410",
":grey_question:": u"\U00002754",
":lantern:": u"\U0001f3ee",
":rice_scene:": u"\U0001f391",
":running:": u"\U0001f3c3",
":ferris_wheel:": u"\U0001f3a1",
":musical_score:": u"\U0001f3bc",
":sparkle:": u"\U00002747",
":wink:": u"\U0001f609",
":art:": u"\U0001f3a8",
":clock330:": u"\U0001f55e",
":minidisc:": u"\U0001f4bd",
":no_entry_sign:": u"\U0001f6ab",
":wind_chime:": u"\U0001f390",
":cyclone:": u"\U0001f300",
":herb:": u"\U0001f33f",
":leopard:": u"\U0001f406",
":banana:": u"\U0001f34c",
":handbag:": u"\U0001f45c",
":honey_pot:": u"\U0001f36f",
":ok:": u"\U0001f197",
":hearts:": u"\U00002665",
":passport_control:": u"\U0001f6c2",
":moyai:": u"\U0001f5ff",
":smile:": u"\U0001f604",
":tiger2:": u"\U0001f405",
":twisted_rightwards_arrows:": u"\U0001f500",
":children_crossing:": u"\U0001f6b8",
":cow:": u"\U0001f42e",
":point_up:": u"\U0000261d",
":house:": u"\U0001f3e0",
":man_with_turban:": u"\U0001f473",
":mountain_railway:": u"\U0001f69e",
":vibration_mode:": u"\U0001f4f3",
":blowfish:": u"\U0001f421",
":it:": u"\U0001f1ee\U0001f1f9",
":oden:": u"\U0001f362",
":clock3:": u"\U0001f552",
":lollipop:": u"\U0001f36d",
":train:": u"\U0001f68b",
":scissors:": u"\U00002702",
":triangular_ruler:": u"\U0001f4d0",
":wedding:": u"\U0001f492",
":flashlight:": u"\U0001f526",
":secret:": u"\U00003299",
":sushi:": u"\U0001f363",
":blue_car:": u"\U0001f699",
":cd:": u"\U0001f4bf",
":milky_way:": u"\U0001f30c",
":mortar_board:": u"\U0001f393",
":crown:": u"\U0001f451",
":speech_balloon:": u"\U0001f4ac",
":bento:": u"\U0001f371",
":grey_exclamation:": u"\U00002755",
":hotel:": u"\U0001f3e8",
":keycap_ten:": u"\U0001f51f",
":newspaper:": u"\U0001f4f0",
":outbox_tray:": u"\U0001f4e4",
":racehorse:": u"\U0001f40e",
":laughing:": u"\U0001f606",
":black_large_square:": u"\U00002b1b",
":books:": u"\U0001f4da",
":eight_spoked_asterisk:": u"\U00002733",
":heavy_check_mark:": u"\U00002714",
":m:": u"\U000024c2",
":wave:": u"\U0001f44b",
":bicyclist:": u"\U0001f6b4",
":cocktail:": u"\U0001f378",
":european_castle:": u"\U0001f3f0",
":point_down:": u"\U0001f447",
":tokyo_tower:": u"\U0001f5fc",
":battery:": u"\U0001f50b",
":dancer:": u"\U0001f483",
":repeat:": u"\U | 0001f501",
":ru:": u"\U0001f1f7\U0001f1fa",
":new_moon:": u"\U0001f311",
":church:": u"\U000026ea",
":date:": | u"\U0001f4c5",
":earth_americas:": u"\U0001f30e",
":footprints:": u"\U0001f463",
":libra:": u"\U0000264e",
":mountain_cableway:": u"\U0001f6a0",
":small_red_triangle_down:": u"\U0001f53b",
":top:": u"\U0001f51d",
":sunglasses:": u"\U0001f60e",
":abcd:": u"\U0001f521",
":cl:": u"\U0001f191",
":ski:": u"\U0001f3bf",
":book:": u"\U0001f4d6",
":hourglass_flowing_sand:": u"\U000023f3",
":stuck_out_tongue_closed_eyes:": u"\U0001f61d",
":cold_sweat:": u"\U0001f630",
":headphones:": u"\U0001f3a7",
":confetti_ball:": u"\U0001f38a",
":gemini:": u"\U0000264a",
":new:": u"\U0001f195",
":pray:": u"\U0001f64f",
":watch:": u"\U0000231a",
":coffee:": u"\U00002615",
":ghost:": u"\U0001f47b",
":on:": u"\U0001f51b",
":pouch:": u"\U0001f45d",
":taxi:": u"\U0001f695",
":hoc |
eayunstack/neutron | neutron/db/securitygroups_db.py | Python | apache-2.0 | 39,022 | 0.000179 | # Copyright 2012 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron_lib.api.definitions import port as port_def
from neutron_lib.api import validators
from neutron_lib.callbacks import events
from neutron_lib.callbacks import exceptions
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib import exceptions as n_exc
from neutron_lib.utils import helpers
from neutron_lib.utils import net
from oslo_utils import uuidutils
import six
from sqlalchemy.orm import scoped_session
from neutron._i18n import _
from neutron.common import constants as n_const
from neutron.common import utils
from neutron.db import _model_query as model_query
from neutron.db import _resource_extend as resource_extend
from neutron.db import _utils as db_utils
from neutron.db import api as db_api
from neutron.db.models import securitygroup as sg_models
from neutron.extensions import securitygroup as ext_sg
from neutron.objects import base as base_obj
from neutron.objects import securitygroup as sg_obj
@resource_extend.has_resource_extenders
@registry.has_registry_receivers
class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
"""Mixin class to add security group to db_base_plugin_v2."""
__native_bulk_support = True
def create_security_group_bulk(self, context, security_groups):
return self._create_bulk('security_group', context,
security_groups)
def _registry_notify(self, res, event, id=None, exc_cls=None, **kwargs):
# NOTE(armax): a callback exception here will prevent the request
# from being processed. This is a hook point for backend's validation;
# we raise to propagate the reason for the failure.
try:
registry.notify(res, event, self, **kwargs)
except exceptions.CallbackFailure as e:
if exc_cls:
reason = (_('cannot perform %(event)s due to %(reason)s') %
{'event': event, 'reason': e})
raise exc_cls(reason=reason, id=id)
@db_api.retry_if_session_inactive()
def create_security_group(self, context, security_group, default_sg=False):
"""Create security group.
If default_sg is true that means we are a default security group for
a given tenant if it does not exist.
"""
s = security_group['security_group']
kwargs = {
'context': context,
'security_group': s,
'is_default': default_sg,
}
self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_CREATE,
exc_cls=ext_sg.SecurityGroupConflict, **kwargs)
tenant_id = s['tenant_id']
if not default_sg:
self._ensure_default_security_group(context, tenant_id)
else:
existing_def_sg_id = self._get_default_sg_id(context, tenant_id)
if existing_def_sg_id is not None:
# default already exists, return it
return self.get_security_group(context, existing_def_sg_id)
with db_api.context_manager.writer.using(context):
sg = sg_obj.SecurityGroup(
context, id=s.get('id') or uuidutils.generate_uuid(),
description=s['description'], project_id=tenant_id,
name=s['name'], is_default=default_sg)
sg.create()
for ethertype in ext_sg.sg_supported_ethertypes:
if default_sg:
# Allow intercommunication
ingress_rule = sg_obj.SecurityGroupRule(
context, id=uuidutils.generate_uuid(),
project_id=tenant_id, security_group_id=sg.id,
direction='ingress', ethertype=ethertype,
remote_group_id=sg.id)
ingress_rule.create()
sg.rules.append(ingress_rule)
egress_rule = sg_obj.SecurityGroupRule(
context, id=uuidutils.generate_uuid(),
project_id=tenant_id, security_group_id=sg.id,
| direction='egress', ethertype=ethertype)
egress_rule.create()
sg.rules.append(egress_rule)
sg.obj_reset_changes(['rules'])
# fetch sg from db to load the sg rules with sg model.
# NOTE(yamamoto): Adding rules above bumps the revision
# of the SG. It would add SG object to the session.
# Expunge it to ensure the following get_obj | ect doesn't
# use the instance.
context.session.expunge(model_query.get_by_id(
context, sg_models.SecurityGroup, sg.id))
sg = sg_obj.SecurityGroup.get_object(context, id=sg.id)
secgroup_dict = self._make_security_group_dict(sg)
kwargs['security_group'] = secgroup_dict
self._registry_notify(resources.SECURITY_GROUP,
events.PRECOMMIT_CREATE,
exc_cls=ext_sg.SecurityGroupConflict,
**kwargs)
registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self,
**kwargs)
return secgroup_dict
@db_api.retry_if_session_inactive()
def get_security_groups(self, context, filters=None, fields=None,
sorts=None, limit=None,
marker=None, page_reverse=False, default_sg=False):
# If default_sg is True do not call _ensure_default_security_group()
# so this can be done recursively. Context.tenant_id is checked
# because all the unit tests do not explicitly set the context on
# GETS. TODO(arosen) context handling can probably be improved here.
filters = filters or {}
if not default_sg and context.tenant_id:
tenant_id = filters.get('tenant_id')
if tenant_id:
tenant_id = tenant_id[0]
else:
tenant_id = context.tenant_id
self._ensure_default_security_group(context, tenant_id)
pager = base_obj.Pager(
sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse)
sg_objs = sg_obj.SecurityGroup.get_objects(
context, _pager=pager, validate_filters=False, **filters)
return [self._make_security_group_dict(obj, fields) for obj in sg_objs]
@db_api.retry_if_session_inactive()
def get_security_groups_count(self, context, filters=None):
filters = filters or {}
return sg_obj.SecurityGroup.count(
context, validate_filters=False, **filters)
@db_api.retry_if_session_inactive()
def get_security_group(self, context, id, fields=None, tenant_id=None):
"""Tenant id is given to handle the case when creating a security
group rule on behalf of another use.
"""
if tenant_id:
tmp_context_tenant_id = context.tenant_id
context.tenant_id = tenant_id
try:
with db_api.context_manager.reader.using(context):
ret = self._make_security_group_dict(self._get_security_group(
context, id), fields)
ret['security_group_rules'] = self.get_security_group_rules(
context, {'security_group_id': [id]})
finally:
if tenant_id:
context.tenant_id = tmp_context_tenant_id
|
FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/distutils/tests/test_bdist_dumb.py | Python | gpl-2.0 | 2,905 | 0.000688 | """Tests for distutils.command.bdist_dumb."""
import os
import sys
import zipfile
import unittest
from test.support import run_unittest
from distutils.core import Distribution
from distutils.command.bdist_dumb import bdist_dumb
from distutils.tests import support
SETUP_PY = """\
from distutils.core import setup
import foo
setup(name='foo', version='0.1', py_modules=['foo'],
url='xxx', author='xxx', author_email='xxx')
"""
try:
import zlib
ZLIB_SUPPORT = True
except ImportError:
ZLIB_SUPPORT = False
class BuildDumbTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def setUp(self):
super(BuildDumbTestCase, self).setUp()
self.old_location = os.getcwd()
self.old_sys_argv = sys.argv, sys.argv[:]
def tearDown(s | elf):
os.chdir(self.old_location)
| sys.argv = self.old_sys_argv[0]
sys.argv[:] = self.old_sys_argv[1]
super(BuildDumbTestCase, self).tearDown()
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_simple_built(self):
# let's create a simple package
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, 'foo')
os.mkdir(pkg_dir)
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
self.write_file((pkg_dir, 'foo.py'), '#')
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
self.write_file((pkg_dir, 'README'), '')
dist = Distribution({'name': 'foo', 'version': '0.1',
'py_modules': ['foo'],
'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx'})
dist.script_name = 'setup.py'
os.chdir(pkg_dir)
sys.argv = ['setup.py']
cmd = bdist_dumb(dist)
# so the output is the same no matter
# what is the platform
cmd.format = 'zip'
cmd.ensure_finalized()
cmd.run()
# see what we have
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
base = "%s.%s.zip" % (dist.get_fullname(), cmd.plat_name)
self.assertEqual(dist_created, [base])
# now let's check what we have in the zip file
fp = zipfile.ZipFile(os.path.join('dist', base))
try:
contents = fp.namelist()
finally:
fp.close()
contents = sorted(filter(None, map(os.path.basename, contents)))
wanted = ['foo-0.1-py%s.%s.egg-info' % sys.version_info[:2], 'foo.py']
if not sys.dont_write_bytecode:
wanted.append('foo.%s.pyc' % sys.implementation.cache_tag)
self.assertEqual(contents, sorted(wanted))
def test_suite():
return unittest.makeSuite(BuildDumbTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
|
cmexiatyp/open_academy | model/wizard.py | Python | apache-2.0 | 611 | 0.006547 | # -*- encoding: utf-8 -*-
from openerp import models | , fields, api
class Wizard(models.TransientModel):
_name = 'openacademy.wizard'
def _default_sessions(self):
return self.env | ['openacademy.session'].browse(self._context.get('active_ids'))
session_ids = fields.Many2many('openacademy.session',
string="Sessions", required=True, default=_default_sessions)
attendee_ids = fields.Many2many('res.partner', string="Attendees")
@api.multi
def subscribe(self):
for session in self.session_ids:
session.attendee_ids |= self.attendee_ids
return {} |
maurossi/deqp | scripts/opengl/gen_es31_wrapper.py | Python | apache-2.0 | 1,342 | 0.022355 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file | except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WIT | HOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
from src_util import *
def commandES31InitStatement (command):
memberName = getFunctionMemberName(command.name)
return "dst->%s\t= src.%s;" % (memberName, memberName)
def genES31WrapperFuncs (registry):
iface = getInterface(registry, api='gles2', version="3.1")
genCommandList(iface, commandES31InitStatement,
directory = OPENGL_DIR,
filename = "gluES3PlusWrapperFuncs.inl",
align = True)
if __name__ == "__main__":
genES31WrapperFuncs(getGLRegistry())
|
rmacqueen/CS274 | p3/md_macqueen.py | Python | gpl-2.0 | 10,475 | 0.025776 | import argparse
from math import sqrt
import sys
NUM_DIMENSIONS = 3
# Gets the distance between two points
def get_distance(pointA, pointB):
if not len(pointA) == len(pointB):
print "Trying to compare points of different dimension. Cannot do that"
sys.exit()
total_sum = 0.0
# Computes Euclidean distance by getting sum of squares of distances
for i in range(len(pointA)):
valueA = pointA[i]
valueB = pointB[i]
total_sum += (valueB - valueA)**2
return sqrt(total_sum)
class AtomicInteraction:
def __init__(self, atom1_id, atom2_id, distance, isBond):
self.atom1_id = atom1_id
self.atom2_id = atom2_id
self.isBond = isBond
self.ref_distance = distance
self.force = 0
def __str__(self):
result = "Atom1 id: " + str(self.atom1_id) + "\t"
result += "Atom2 id: " + str(self.atom2_id) + "\t"
result += "isBond: " + str(self.isBond) + "\n"
result += "ref distance: " + str(self.ref_distance) + "\n"
return result
class Atom:
def __init__(self, id_num, position_x, position_y, position_z, velocity_x, velocity_y, velocity_z, mass, bonded_atoms):
# Use zero indexing
self.id = id_num - 1
self.position_vec = [position_x, position_y, position_z]
self.velocity_vec = [velocity_x, velocity_y, velocity_z]
self.acceleration_vec = [0, 0, 0]
self.force_vec = [0, 0, 0]
self.mass = mass
# Use zero indexing
self.bonded_atoms = [id_num - 1 for id_num in bonded_atoms]
self.interactions = []
def add_interaction(self, interaction):
self.interactions.append(interaction)
def get_bonded_atoms(self):
return self.bonded_atoms
def get_position(self):
return self.position_vec
def get_velocity(self):
return self.velocity_vec
def get_acceleration(self):
return self.acceleration_vec
def set_new_velocity(self, delta_t):
self.velocity_vec = [self.velocity_vec[i] + 0.5*self.acceleration_vec[i]*delta_t for i in range(NUM_DIMENSIONS)]
def set_new_position(self, delta_t):
self.position_vec = [self.position_vec[i] + self.velocity_vec[i]*delta_t for i in range(NUM_DIMENSIONS)]
def set_new_acceleration(self):
self.acceleration_vec = [self.force_vec[i] / self.mass for i in range(NUM_DIMENSIONS)]
def add_new_force(self, force, other_atom_pos):
distance = get_distance(self.position_vec, other_atom_pos)
self.force_vec = [self.force_vec[i] + force * ((other_atom_pos[i] - self.position_vec[i])/distance) for i in range(NUM_DIMENSIONS)]
def __str__(self):
result = "id: " + str(self.id) + "\n"
result += "position: " + str(self.position_vec) + "\n"
result += "velocity: " + str(self.velocity_vec) + "\n"
result += "acceleration: " + str(self.acceleration_vec) + "\n"
result += "force: " + str(self.force_vec) + "\n"
result += "bonded atoms: " + str(self.bonded_atoms) + "\n"
return result
class MolecularDynamics:
def __init__(self, atoms, bonds, bond_spring_k, nonbond_spring_k, atomic_mass, delta_t):
self.atoms = atoms
self.bonds = bonds
self.bond_spring_k = bond_spring_k
self.nonbond_spring_k = nonbond_spring_k
self.atomic_mass = atomic_mass
self.delta_t = delta_t
def set_new_velocities_and_positions(self):
for atom in self.atoms:
atom.set_new_velocity(self.delta_t)
atom.set_new_position(self.delta_t)
def set_new_velocities(self):
for atom in self.atoms:
atom.s | et_new_velocity(self.delta_t)
def set_new_acceleration | s(self):
for atom in self.atoms:
atom.set_new_acceleration()
def calculate_potential_energy(self):
bond_potential_energy = 0.0
non_bond_potential_energy = 0.0
for interaction in self.bonds:
atom1 = self.atoms[interaction.atom1_id]
atom2 = self.atoms[interaction.atom2_id]
distance = get_distance(atom1.get_position(), atom2.get_position())
if interaction.isBond:
force = self.bond_spring_k * (distance - interaction.ref_distance)
potenial_energy = 0.5 * self.bond_spring_k * (distance - interaction.ref_distance)**2
bond_potential_energy += potenial_energy
else:
force = self.nonbond_spring_k * (distance - interaction.ref_distance)
potenial_energy = 0.5 * self.nonbond_spring_k * (distance - interaction.ref_distance)**2
non_bond_potential_energy += potenial_energy
interaction.force = force
return [bond_potential_energy, non_bond_potential_energy]
def calculate_kinetic_energy(self):
kinetic_energy = 0.0
for atom in self.atoms:
total_velocity = sum([atom.get_velocity()[i] ** 2 for i in range(NUM_DIMENSIONS)])
kinetic_energy += (0.5 * atom.mass * total_velocity)
return kinetic_energy
def calculate_forces(self):
for atom in self.atoms:
atom.force_vec = [0, 0, 0]
for interaction in atom.interactions:
otherAtomId = interaction.atom1_id if interaction.atom1_id != atom.id else interaction.atom2_id
otherAtom = self.atoms[otherAtomId]
atom.add_new_force(interaction.force, otherAtom.get_position())
def run_simulation(self, num_iterations, erg_filename, rvc_filename, first_line):
erg_output = open(erg_filename, 'w')
header = "# step\tE_k\tE_b\tE_nB\tE_tot\n"
erg_output.write(header)
rvc_output = open(rvc_filename, 'w')
rvc_output.write(first_line)
self.write_out_atoms(rvc_output, 0, 0)
inital_energy = lower_bound = upper_bound = 0
for run in range(1, num_iterations+1):
self.set_new_velocities_and_positions()
bond_energy, non_bond_energy = self.calculate_potential_energy()
self.calculate_forces()
self.set_new_accelerations()
self.set_new_velocities()
kinetic_energy = self.calculate_kinetic_energy()
total_energy = bond_energy + non_bond_energy + kinetic_energy
# Record initial energy so we can check for instability
# in subsequent runs
if run == 1:
inital_energy = total_energy
lower_bound = inital_energy / 10.0
upper_bound = inital_energy * 10.0
if total_energy < lower_bound or total_energy > upper_bound:
print "Oh dear! We are on run " + str(run) + " and this simulation has become most unstable!"
print "Inital energy was: " + "{0:.1f}".format(inital_energy)
print "Current energy is: " + "{0:.1f}".format(total_energy)
print "I fear I must exit! Ta-ta!"
sys.exit()
if run % 10 == 0:
to_write = str(run+1) + "\t"
to_write += "{0:.1f}".format(kinetic_energy) + "\t"
to_write += "{0:.1f}".format(bond_energy) + "\t"
to_write += "{0:.1f}".format(non_bond_energy) + "\t"
to_write += "{0:.1f}".format(total_energy) + "\n"
erg_output.write(to_write)
self.write_out_atoms(rvc_output, run, total_energy)
erg_output.close()
rvc_output.close()
def print_atom_dist(self):
i = 17
j = 96
atom1 = self.atoms[i-1]
atom2 = self.atoms[j-1]
dist = get_distance(atom1.get_position(), atom2.get_position())
print "{0:.4f}".format(dist)
def write_out_atoms(self, output, step, energy):
to_write = ""
# Only add this header when not the inital print out
if step > 0:
to_write += "#At time step: " + str(step) + ", energy = " + "{0:.3f}".format(energy) + "kJ\n"
for atom in self.atoms:
position = atom.get_position()
to_write += str(atom.id + 1) + "\t"
to_write += "{0:.4f}".format(position[0]) + "\t"
to_write += "{0:.4f}".format(position[1]) + "\t"
to_write += "{0:.4f}".format(position[2]) + "\t"
velocity = atom.get_velocity()
to_write += "{0:.4f}".format(velocity[0]) + "\t"
to_write += "{0:.4f}".format(velocity[1]) + "\t"
to_write += "{0:.4f}".format(velocity[2]) + "\t"
for bonded_atom_id in atom.bonded_atoms:
to_write += str(bonded_atom_id + 1) + "\t"
to_write = to_write.strip() + "\n"
output.write(to_write)
def get_atoms_from_file(filename, atomic_mass):
file_input = open(filename, 'r')
# Skip the first line which is just a header for the file
first_line = file_input.readline()
out_first_line = "\t".join(first_line.split()) + "\n"
line = file_input.readline()
atoms = []
while line:
arr = line.split()
atom = Atom(int(arr[0]), float(arr[1]), float(arr[2]), float(arr[3]), float(arr[4]), float(arr[5]), float(arr[6]), atomic_mass, [int(x) for x in arr[7:]])
atoms.append(atom)
line = file_input.readline()
file_input.close()
return [atoms, out_first_line]
def get_args(args):
parser = argparse.Arg |
vprnet/timeline | app/_config.py | Python | mit | 840 | 0.002381 | import os
import inspect
# Frozen Flask
FREEZER_DEFAULT_MIMETYPE = 'text/html'
FREEZER_IGNORE_MIMETYPE_WARNINGS = True
# Amazon S3 Settings
AWS_KEY = ''
AWS_SECRET_KEY = ''
AWS_BUCKET = 'www.vpr.net'
AWS_DIRECTORY = 'sandbox/timeline'
# Cache Settings (units in seconds)
STATIC_EXPIRES = 60 * 24 * 3600
HTML_EXPIRES = 3600
# Upload Settings (ignores anything included below)
IGNORE_DIRECTORIES = ['.git', 'venv', 'sass', 'templates', 'gimp', 'node_modules']
IGNORE_FILES = ['.DS_Store']
IGNORE_FILE_TYPES = ['.gz', '.p | yc', '.py', '.rb', '.md']
# Always | AWS_DIRECTORY for VPR projects
if AWS_DIRECTORY:
BASE_URL = 'http://' + AWS_BUCKET + '/' + AWS_DIRECTORY
FREEZER_BASE_URL = BASE_URL
else:
BASE_URL = 'http://' + AWS_BUCKET
ABSOLUTE_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + '/'
|
antoinecarme/pyaf | tests/artificial/transf_Fisher/trend_LinearTrend/cycle_30/ar_12/test_artificial_128_Fisher_LinearTrend_30_12_20.py | Python | bsd-3-clause | 265 | 0.086792 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = | 30, transform = "Fisher", si | gma = 0.0, exog_count = 20, ar_order = 12); |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/cherrypy/cherrypy/tutorial/tut05_derived_objects.py | Python | bsd-3-clause | 2,291 | 0.003492 | """
Tutorial - Object inheritance
You are free to derive your request handler classes from any base
class you wish. In most real-world applications, you will probably
want to create a central base class used for all your pages, which takes
care of things like printing a common page header and footer.
"""
import cherrypy
class Page:
# Store the page title in a class attribute
title = 'Untitled Page'
def header(self):
return '''
<html>
<head>
<title>%s</title>
<head>
<body>
<h2>%s</h2>
''' % (self.title, self.title)
def footer(self):
return '''
</bo | dy>
</html>
'''
# Note that header and footer don't get their exposed attributes
# set to True. This isn't necessary since the user isn't supposed
# t | o call header or footer directly; instead, we'll call them from
# within the actually exposed handler methods defined in this
# class' subclasses.
class HomePage(Page):
# Different title for this page
title = 'Tutorial 5'
def __init__(self):
# create a subpage
self.another = AnotherPage()
def index(self):
# Note that we call the header and footer methods inherited
# from the Page class!
return self.header() + '''
<p>
Isn't this exciting? There's
<a href="./another/">another page</a>, too!
</p>
''' + self.footer()
index.exposed = True
class AnotherPage(Page):
title = 'Another Page'
def index(self):
return self.header() + '''
<p>
And this is the amazing second page!
</p>
''' + self.footer()
index.exposed = True
import os.path
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HomePage(), config=tutconf)
else:
# This branch is for the test suite; you can ignore it.
cherrypy.tree.mount(HomePage(), config=tutconf)
|
qateam123/eq | tests/integration/questionnaire/test_questionnaire_question_guidance.py | Python | mit | 4,682 | 0.002563 | from tests.integration.create_token import create_token
from tests.integration.integration_test_case import IntegrationTestCase
class TestQuestionnaireQuestionGuidance(IntegrationTestCase):
def test_question_guidance(self):
# Given I launch a questionnaire with various guidance
token = create_token('question_guidance', 'test')
resp = self.client.get('/session?token=' + token.decode(), follow_redirects=Fals | e)
# When we navigate to the block with all guidance features enabled
resp_url, resp = self.postRedirectGet(resp.location, {'action[start_questionnaire]': ''})
# Then we are presented with the question guidance with all features enabled
self.assertIn('block-test-guidance-all', resp_url)
content = resp.get_data(True)
self.assertIn('Test guidance all', content)
self.assertIn('>Include<', content)
self.assertIn('>Item Include 1<', content)
| self.assertIn('>Item Include 2<', content)
self.assertIn('>Item Include 3<', content)
self.assertIn('>Item Include 4<', content)
self.assertIn('>Exclude<', content)
self.assertIn('>Item Exclude 1<', content)
self.assertIn('>Item Exclude 2<', content)
self.assertIn('>Item Exclude 3<', content)
self.assertIn('>Item Exclude 4<', content)
self.assertIn('>Other<', content)
self.assertIn('>Item Other 1<', content)
self.assertIn('>Item Other 2<', content)
self.assertIn('>Item Other 3<', content)
self.assertIn('>Item Other 4<', content)
self.assertIn('Guidance <b>include</b> description text', content)
self.assertIn('Guidance <b>exclude</b> description text', content)
self.assertIn('Guidance <b>other</b> description text', content)
self.assertIn('Text question', content)
self.assertIn('<input', content)
# When we continue to the next page with combinations of the guidance title
resp_url, resp = self.postRedirectGet(resp_url, {'action[save_continue]': ''})
# Then I am presented with the title guidance correctly
self.assertIn('block-test-guidance-title', resp_url)
content = resp.get_data(True)
self.assertIn('This one has a description but no list', content)
self.assertIn('No list items below this text', content)
self.assertIn('This one has no list or description', content)
self.assertIn('Text question', content)
self.assertIn('<input', content)
# When we continue to the next page with combinations of the guidance descriptions
resp_url, resp = self.postRedirectGet(resp_url, {'action[save_continue]': ''})
# Then I am presented with the description guidance correctly
self.assertIn('block-test-guidance-description', resp_url)
content = resp.get_data(True)
self.assertIn('No title above this text, list below', content)
self.assertIn('>Item Include 1<', content)
self.assertIn('>Item Include 2<', content)
self.assertIn('>Item Include 3<', content)
self.assertIn('>Item Include 4<', content)
self.assertIn('Just description, no title above this text, no list below', content)
self.assertIn('Text question', content)
self.assertIn('<input', content)
# When we continue to the next page with combinations of the guidance lists
resp_url, resp = self.postRedirectGet(resp_url, {'action[save_continue]': ''})
# Then I am presented with the lists guidance correctly
self.assertIn('block-test-guidance-lists', resp_url)
content = resp.get_data(True)
self.assertIn('Title, no description, list follows', content)
self.assertIn('>Item Include 1<', content)
self.assertIn('>Item Include 2<', content)
self.assertIn('>Item Include 3<', content)
self.assertIn('>Item Include 4<', content)
self.assertIn('>List with no title or description 1<', content)
self.assertIn('>List with no title or description 2<', content)
self.assertIn('>List with no title or description 3<', content)
self.assertIn('>List with no title or description 4<', content)
self.assertIn('Text question', content)
self.assertIn('<input', content)
# And I can continue to the summary page
resp_url, resp = self.postRedirectGet(resp_url, {'action[save_continue]': ''})
self.assertIn('summary', resp_url)
# And Submit my answers
resp_url, resp = self.postRedirectGet(resp_url, {'action[submit_answers]': ''})
self.assertIn('thank-you', resp_url)
|
Sriee/epi | data_structures/search/bsearch.py | Python | gpl-3.0 | 7,820 | 0.003197 | from collections import Counter
def bsearch(nums, target):
"""
Binary Search with duplicates.
:param nums: Given array
:param target: The element to find in the array
:return: first index of the element found
"""
idx, l, r = -1, 0, len(nums) - 1
while l <= r:
mid = l + (r - l) // 2
if nums[mid] < target:
l = mid + 1
elif nums[mid] == target:
idx = mid
r = mid - 1
else:
r = mid - 1
return idx
def search_range(nums, target):
"""
Leet code. Solution -> Accepted
Run Time: 136 ms. It's a very slow solution. Need to improve
Given a sorted array find the range of the element.
If the element is not present return [-1, -1]
Else If only one element is present return [idx, idx]
Else return [start, end] position of the element
:param nums: Sorted array
:param target: element to find
:return: range of the element
"""
def binary_search(nums, target, l, r, dir=0):
fidx = -1
while l <= r:
m = l + (r - l) // 2
if nums[m] < target:
l = m + 1
elif nums[m] == target:
fidx = m
if dir == 0:
return l, r, fidx
elif dir == 1:
r = m - 1
else:
l = m + 1
else:
r = m - 1
return None, None, fidx
idx = binary_search(nums, target, 0, len(nums) - 1)
if idx[2] == -1:
return [-1, -1]
first = binary_search(nums, target, idx[0], idx[2], 1)
last = binary_search(nums, target, idx[2], idx[1], 2)
return [first if first != -1 else idx, last if last != -1 else idx]
def search_range2(nums, target):
"""
Leet code. Solution -> Accepted
Run Time: 136 ms. Leet code error with run time. Fastest solution follows the similar
route
Keep search range
Given a sorted array find the range of the element.
If the element is not present return [-1, -1]
Else If only one element is present return [idx, idx]
Else return [start, end] position of the element
:param nums: Sorted array
:param target: element to find
:return: range of the element
"""
def binary_search(nums, target, l, r, dir=0):
fidx = -1
while l <= r:
m = l + (r - l) // 2
if nums[m] < target:
l = m + 1
elif nums[m] == target:
fidx = m
if dir == 0:
r = m - 1
else:
l = m + 1
else:
r = m - 1
return fidx
first = binary_search(nums, target, 0, len(nums) - 1)
if first == -1:
return [-1, -1]
last = binary_search(nums, target, first, len(nums) - 1, 1)
return [first, last if last != -1 else first]
def shortest_completing_words(license_plate, words):
"""
Leet code. Solution -> Accepted
Run Time: 180 ms Not optimal. Intersection of Counter is taking too much time.
However not sure why this is happening. Because worst case we will doing an
intersection of 26 letters
Given a License Plate will be alpha numeric character. Extract strings from the
license plate and return the shortest word with all the characters in the license
plate
Example:
license_plate: "1s3 PSt", words = ["step", "steps", "stripe", "stepple"]
Output: "steps"
:param license_plate: Alphanumeric license plate string
:param words: list of words
:return: return smallest word will all characters in the license plate
"""
res, lp = None, Counter(''.join(i for i in license_plate if i.isalpha()).lower())
for w in words:
if (Counter(w) & lp) != lp:
continue
if res is None:
res = w
continue
if len(w) < len(res):
res = w
return res
def shortest_completing_words_optimized(license_plate, words):
"""
Leet code. Solution -> Accepted
Run Time: 74 ms Optimal Solution.
Given a License Plate will be alpha numeric character. Extract strings from the
license plate and return the shortest word with all the characters in the license
plate
Example:
license_plate: "1s3 PSt", words = ["step", "steps", "stripe", "stepple"]
Output: "steps"
:param license_plate: Alphanumeric license plate string
:param words: list of words
:return: return smallest word will all characters in the license plate
"""
res, lp = None | , Counter(''.join(i for i in license_plate if i.isalpha()).lower())
for word in words:
flag = True
for k, v in lp.items():
if k not in word and v > word.count(k):
flag = False
break
if flag:
if not res:
res = word
elif len(word) < len(res):
res = word
return res
def find_min_rotated_sorted_array(nums) | :
"""
Leet code. Solution -> Accepted
Run Time:
Find minimum element in a rotated sorted array. The array does not contains
duplicates. If the array has duplicates, optical time
:param nums: Given array
:return: minimum element in an array
"""
low, high = 0, len(nums) - 1
while low <= high:
mid = (low + high) // 2
if nums[mid] > nums[len(nums) - 1]:
low = mid + 1
else:
high = mid - 1
return nums[low]
def search_matrix(matrix, target):
"""
Leet code. Solution -> Accepted
Run Time: 72 ms. Optimal solution exists but the code is not simple
Search an element in an m x n Matrix. Each row in the matrix is sorted in ascending
order and matrix[i][0] > matrix[i - 1][len(matrix[i - 1]) - 1]
:param matrix: Given matrix as list of lists
:param target: element to find
:return: True if the element is found, False otherwise
"""
if not matrix:
return False
low, high = 0, len(matrix) * len(matrix[0]) - 1
while low <= high:
mid = (low + high) // 2
x, y = mid // len(matrix[0]), mid % len(matrix[0])
if matrix[x][y] == target:
return True
elif matrix[x][y] < target:
low = mid + 1
else:
high = mid - 1
return False
def search_matrix_2D(matrix, target):
"""
Leet code. Solution -> Accepted
Run Time: 44 ms. Optimal Solution. Imagine two lines start from left bottom and
row (decreasing), column (increasing) like they are scanning for an element
[[ | ],
[ | ],
[-|-], [--]
]
Given an 2D matrix with
- Each row sorted in ascending order
- Column is also sorted in ascending order
Find the target element.
:param matrix: 2D Matrix
:param target: Element to find
:return: True if the element is found. False otherwise
"""
if not matrix:
return False
# Start from left bottom
row, col = len(matrix) - 1, 0
while row >= 0 and col < len(matrix[0]):
if matrix[row][col] < target:
# Since the column is sorted, the element won't be present above this
# column. We move to the right
col += 1
elif matrix[row][col] > target:
# Since the row is sorted in ascending order, the element won't be present
# in this row. We decrease the row pointer and move up
row -= 1
else:
# Most of the time we will be moving right or up. Placing the equal check
# here reduced the '==' check thereby reducing the run time
return True
return False
|
guillaume-havard/com_elements_gui | src/interface.py | Python | mit | 182 | 0.005495 | #!/usr/bin/ | python3
"""
Copyright (c) 2014 Guillaume Havard - BVS
"""
import tkinter as tk
from appli | cation import *
root = tk.Tk()
app = Application(master=root)
app.mainloop()
|
setsulla/stir | project/magnolia/script/kancolle.old/exercises.py | Python | mit | 1,442 | 0.006241 | import os
import sys
import time
from magnolia.utility import *
from magnolia.utility import LOG as L
from magnolia.script.kancolle import testcase_normal
class TestCase(testcase_normal.TestCase_Normal):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
L.info("*** Start TestCase : %s *** " % __file__)
def test_1(self):
L.info("*** Exercises ***")
try:
self.minicap_start(); time.sleep(2)
self.assertTrue(self.initialize(self.g | et("args.deploy")), "Can't Login & Check Start.")
while self.expedition_result(): time.sleep(1)
self.slack_message(self.get("bot.exercises"))
self.assertTrue(self.exercises(), "Can't Exercises.")
while self.expedition_result(): time.sleep(1)
self.assertTrue(self.supply(self.get("args.fleet")), "Can't Supply.")
self.assertTrue(self.home | ())
while self.expedition_result(): time.sleep(1)
self.minicap_finish(); time.sleep(2)
except Exception as e:
L.warning(type(e).__name__ + ": " + str(e))
self.minicap_finish(); time.sleep(2)
self.minicap_create_video()
self.fail("Error Ocurred. Please Check the Video.")
@classmethod
def tearDownClass(cls):
L.info("*** End TestCase : %s *** " % __file__)
|
yunojuno/django-perimeter | tests/test_middleware.py | Python | mit | 5,555 | 0.00036 | from unittest import mock
from urllib.parse import urlparse
from django.contrib.auth.models import AnonymousUser, User
from django.core.exceptions import ImproperlyConfigured, MiddlewareNotUsed
from django.test import RequestFactory, TestCase, override_settings
from django.urls import resolve, reverse
from perimeter.middleware import (
PERIMETER_SESSION_KEY,
PerimeterAccessMiddleware,
bypass_perimeter,
check_middleware,
get_access_token,
get_request_token,
set_request_token,
)
from perimeter.models import AccessToken, EmptyToken
@override_settings(PERIMETER_ENABLED=True)
class PerimeterMiddlewareTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.request = self.factory.get("/")
# spoof the Auth and Session middleware
self.request.user = User()
self.request.session = {}
self.middleware = PerimeterAccessMiddleware()
def _assertRedirectsToGateway(self, request, query=None):
query = query or "next=%2F"
# NB assertRedirects doesn't work!
resp = self.middleware.process_request(request)
self.assertEqual(resp.status_code, 302)
# use a resolver to strip off any quesrystring params
resolver = resolve(resp.url)
self.assertEqual(resolver.url_name, "gateway")
self.assertEqual(resolver.namespace, "perimeter")
self.assertEqual(urlparse(resp.url).query, query)
def test_middleware_disabled(self):
with mock.patch("perimeter.middleware.PERIMETER_ENABLED", False):
self.assertRaises(MiddlewareNotUsed, PerimeterAccessMiddleware)
def test_bypass_perimeter_default(self):
"""Perimeter login urls excluded."""
request = self.factory.get("/")
self.assertFalse(bypass_perimeter(request))
request = self.factory.get(reverse("perimeter:gateway"))
self.assertTrue(bypass_perimeter(request))
def test_get_request_token_session(self):
at = AccessToken.objects.create_access_token()
self.request.session[PERIMETER_SESSION_KEY] = at.token
self.assertEqual(get_request_token(self.request), at.token)
def test_get_request_token_http_header(self):
at = AccessToken.objects.create_access_token()
request = self.factory.get("/", HTTP_X_PERIMETER_TOKEN=at.token)
request.session = {}
self.assertEqual(get_request_token(request), at.token)
def test_get_request_token_empty(self):
token = get_request_token(self.request)
self.assertIsNone(token)
def test_set_request_token(self):
self.assertIsNone(get_request_token(self.request))
set_request_token(self.request, "foo")
self.assertEqual(get_request_token(self.request), "foo")
def test_get_access_token(self):
at = AccessToken.objects.create_access_token()
self.request.session[PERIMETER_SESSION_KEY] = at.token
self.assertEqual(get_access_token(self.request), at)
def test_access_token_empty(self):
token = get_access_token(self.request)
self.assertIsInstance(token, EmptyToken)
def test_check_middleware(self):
"""Missing request.session should raise AssertionError."""
self.assertEqual(check_middleware(lambda r: r)(self.request), self.request)
def test_check_middleware_fails(self):
"""Missing request.session sho | uld raise AssertionError."""
self.assertEqual(check_middleware(lambda r: r)(self.request), self.request)
del self.request.session
with self.assertRaises(ImproperlyConfigured):
| check_middleware(lambda r: r)(self.request)
def test_missing_session(self):
"""Missing request.session should raise AssertionError."""
del self.request.session
self.assertRaises(
ImproperlyConfigured, self.middleware.process_request, self.request
)
def test_missing_token(self):
"""AnonymousUser without a token should be denied."""
self.request.user = AnonymousUser()
self._assertRedirectsToGateway(self.request)
def test_invalid_token(self):
self.request.user = AnonymousUser()
self.request.session["token"] = "foobar"
self._assertRedirectsToGateway(self.request)
def test_valid_token(self):
"""AnonymousUser with a valid session token should pass through."""
AccessToken(token="foobar").save()
self.request.user = AnonymousUser()
self.request.session["token"] = "foobar"
self._assertRedirectsToGateway(self.request)
def test_perimeter_token_header(self):
"""Test that the X-Perimeter-Token header works."""
AccessToken(token="foobar").save()
self.request.user = AnonymousUser()
self._assertRedirectsToGateway(self.request)
self.request.META["HTTP_X_PERIMETER_TOKEN"] = "foobar"
self.middleware.process_request(self.request)
def test_next_query_string_set(self):
"""Check `next` query string param is properly encoded"""
# Simple path
request = self.factory.get("/somepath/")
request.user = AnonymousUser()
request.session = {}
self._assertRedirectsToGateway(request, query="next=%2Fsomepath%2F")
# Path with query string
request = self.factory.get("/somepath/?important=param")
request.user = AnonymousUser()
request.session = {}
self._assertRedirectsToGateway(
request, query="next=%2Fsomepath%2F%3Fimportant%3Dparam"
)
|
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractQuillofkarnikaWordpressCom.py | Python | bsd-3-clause | 708 | 0.026836 | def extractQuillofkarnikaWordpressCom(item):
'''
Parser for 'quillofkarnika.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp | or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Top Giants: Rebirth of the Black-Bellied Wife', 'Top Giants: Rebirth of the Black-Bellied Wife', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageW | ithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
CoachCoen/games | test/test_all.py | Python | mit | 467 | 0 | import unittest
from test.test_game_state import | TestAvailableActionSets
from test.test_ga | me_state import TestTurnComplete
from test.test_vector import TestVector
from test.test_buttons import TestButtonCollection
from test.test_ai_simple import TestAISimple
from test.test_drawing_surface import TestDrawingSurface
from test.test_game_actions import TestGameActions
from test.test_game_objects import TestGameObjects
if __name__ == '__main__':
unittest.main()
|
dstufft/warehouse | warehouse/migrations/versions/bf73e785eed9_add_notify_column_to_adminflag.py | Python | apache-2.0 | 1,034 | 0.000967 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License | .
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the L | icense for the specific language governing permissions and
# limitations under the License.
"""
Add notify column to AdminFlag
Revision ID: bf73e785eed9
Revises: 5dda74213989
Create Date: 2018-03-23 21:20:05.834821
"""
from alembic import op
import sqlalchemy as sa
revision = "bf73e785eed9"
down_revision = "5dda74213989"
def upgrade():
op.add_column(
"warehouse_admin_flag",
sa.Column(
"notify", sa.Boolean(), server_default=sa.text("false"), nullable=False
),
)
def downgrade():
op.drop_column("warehouse_admin_flag", "notify")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.