repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
brainwane/zulip
|
zerver/tests/test_thumbnail.py
|
Python
|
apache-2.0
| 17,260
| 0.00197
|
import base64
import urllib
from io import StringIO
import orjson
from django.conf import settings
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
create_s3_buckets,
get_test_image_file,
override_settings,
use_s3_backend,
)
from zerver.lib.upload import upload_backend, upload_emoji_image
from zerver.lib.users import get_api_key
class ThumbnailTest(ZulipTestCase):
@use_s3_backend
def test_s3_source_type(self) -> None:
def get_file_path_urlpart(uri: str, size: str='') -> str:
url_in_result = 'smart/filters:no_upscale()%s/%s/source_type/s3'
sharpen_filter = ''
if size:
url_in_result = f'/{size}/{url_in_result}'
sharpen_filter = ':sharpen(0.5,0.2,true)'
hex_uri = base64.urlsafe_b64encode(uri.encode()).decode('utf-8')
return url_in_result % (sharpen_filter, hex_uri)
create_s3_buckets(
settings.S3_AUTH_UPLOADS_BUCKET,
settings.S3_AVATAR_BUCKET)
hamlet = self.example_user('hamlet')
self.login_user(hamlet)
fp = StringIO("zulip!")
fp.name = "zulip.jpeg"
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
json = orjson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
quoted_uri = urllib.parse.quote(uri[1:], safe='')
# Test full size image.
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.statu
|
s_code, 302, result)
|
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Test thumbnail size.
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=thumbnail")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri, '0x300')
self.assertIn(expected_part_url, result.url)
# Test custom emoji urls in Zulip messages.
user_profile = self.example_user("hamlet")
image_file = get_test_image_file("img.png")
file_name = "emoji.png"
upload_emoji_image(image_file, file_name, user_profile)
custom_emoji_url = upload_backend.get_emoji_url(file_name, user_profile.realm_id)
emoji_url_base = '/user_avatars/'
self.assertEqual(emoji_url_base, custom_emoji_url[:len(emoji_url_base)])
quoted_emoji_url = urllib.parse.quote(custom_emoji_url[1:], safe='')
# Test full size custom emoji image (for emoji link in messages case).
result = self.client_get(f"/thumbnail?url={quoted_emoji_url}&size=full")
self.assertEqual(result.status_code, 302, result)
self.assertIn(custom_emoji_url, result.url)
# Tests the /api/v1/thumbnail api endpoint with standard API auth
self.logout()
result = self.api_get(
hamlet,
f'/thumbnail?url={quoted_uri}&size=full')
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_path_urlpart(uri)
self.assertIn(expected_part_url, result.url)
# Test with another user trying to access image using thumbor.
self.login('iago')
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 403, result)
self.assert_in_response("You are not authorized to view this file.", result)
def test_external_source_type(self) -> None:
def run_test_with_image_url(image_url: str) -> None:
# Test full size image.
self.login('hamlet')
quoted_url = urllib.parse.quote(image_url, safe='')
encoded_url = base64.urlsafe_b64encode(image_url.encode()).decode('utf-8')
result = self.client_get(f"/thumbnail?url={quoted_url}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/smart/filters:no_upscale()/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test thumbnail size.
result = self.client_get(f"/thumbnail?url={quoted_url}&size=thumbnail")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/0x300/smart/filters:no_upscale():sharpen(0.5,0.2,true)/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test api endpoint with standard API authentication.
self.logout()
user_profile = self.example_user("hamlet")
result = self.api_get(user_profile,
f"/thumbnail?url={quoted_url}&size=thumbnail")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/0x300/smart/filters:no_upscale():sharpen(0.5,0.2,true)/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test api endpoint with legacy API authentication.
user_profile = self.example_user("hamlet")
result = self.client_get(f"/thumbnail?url={quoted_url}&size=thumbnail&api_key={get_api_key(user_profile)}")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/0x300/smart/filters:no_upscale():sharpen(0.5,0.2,true)/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test a second logged-in user; they should also be able to access it
user_profile = self.example_user("iago")
result = self.client_get(f"/thumbnail?url={quoted_url}&size=thumbnail&api_key={get_api_key(user_profile)}")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/0x300/smart/filters:no_upscale():sharpen(0.5,0.2,true)/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
# Test with another user trying to access image using thumbor.
# File should be always accessible to user in case of external source
self.login('iago')
result = self.client_get(f"/thumbnail?url={quoted_url}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = '/smart/filters:no_upscale()/' + encoded_url + '/source_type/external'
self.assertIn(expected_part_url, result.url)
image_url = 'https://images.foobar.com/12345'
run_test_with_image_url(image_url)
image_url = 'http://images.foobar.com/12345'
run_test_with_image_url(image_url)
image_url = '//images.foobar.com/12345'
run_test_with_image_url(image_url)
def test_local_file_type(self) -> None:
def get_file_path_urlpart(uri: str, size: str='') -> str:
url_in_result = 'smart/filters:no_upscale()%s/%s/source_type/local_file'
sharpen_filter = ''
if size:
url_in_result = f'/{size}/{url_in_result}'
sharpen_filter = ':sharpen(0.5,0.2,true)'
hex_uri = base64.urlsafe_b64encode(uri.encode()).decode('utf-8')
return url_in_result % (sharpen_filter, hex_uri)
self.login('hamlet')
fp = StringIO("zulip!")
fp.name = "zulip.jpeg"
result = self.client_post("/json/user_uploads", {'file': fp})
self.assert_json_success(result)
json = orjson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEqual(base, uri[:len(base)])
# Test full size image.
# We remove the forward slash infront of the `/user_uploads/` to match
# Markdown behaviour.
quoted_uri = urllib.parse.quote(uri[1:], safe='')
result = self.client_get(f"/thumbnail?url={quoted_uri}&size=full")
self.assertEqual(result.status_code, 302, result)
expected_part_url = get_file_pat
|
itucsdb1611/itucsdb1611
|
classes/information.py
|
Python
|
gpl-3.0
| 270
| 0
|
class Information:
def __in
|
it__(self, objectid, cvid, information_type_id, description):
self.objectid = objectid
self.cvid = cvid
|
self.information_type_id = information_type_id
self.description = description
self.deleted = 0
|
kuiche/chromium
|
net/tools/testserver/testserver.py
|
Python
|
bsd-3-clause
| 36,527
| 0.008104
|
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a simple HTTP server used for testing Chrome.
It supports several test URLs, as specified by the handlers in TestPageHandler.
It defaults to living on localhost:8888.
It can use https if you specify the flag --https=CERT where CERT is the path
to a pem file containing the certificate and private key that should be used.
To shut it down properly, visit localhost:8888/kill.
"""
import base64
import BaseHTTPServer
import cgi
import optparse
import os
import re
import shutil
import SocketServer
import sys
import time
import tlslite
import tlslite.api
import pyftpdlib.ftpserver
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
SERVER_HTTP = 0
SERVER_FTP = 1
debug_output = sys.stderr
def debug(str):
debug_output.write(str + "\n")
debug_output.flush()
class StoppableHTTPServer(BaseHTTPServer.HTTPServer):
"""This is a specialization of of BaseHTTPServer to allow it
to be exited cleanly (by setting its "stop" member to True)."""
def serve_forever(self):
self.stop = False
self.nonce = None
while not self.stop:
self.handle_request()
self.socket.close()
class HTTPSServer(tlslite.api.TLSSocketServerMixIn, StoppableHTTPServer):
"""This is a specialization of StoppableHTTPerver that add https support."""
def __init__(self, server_address, request_hander_class, cert_path):
s = open(cert_path).read()
x509 = tlslite.api.X509()
x509.parse(s)
self.cert_chain = tlslite.api.X509CertChain([x509])
s = open(cert_path).read()
self.private_key = tlslite.api.parsePEMKey(s, private=True)
self.session_cache = tlslite.api.SessionCache()
StoppableHTTPServer.__init__(self, server_address, request_hander_class)
def handshake(self, tlsConnection):
"""Creates the SSL connection."""
try:
tlsConnection.handshakeServer(certChain=self.cert_chain,
privateKey=self.private_key,
sessionCache=self.session_cache)
tlsConnection.ignoreAbruptClose = True
return True
except tlslite.api.TLSError, error:
print "Handshake failure:", str(error)
return False
class TestPageHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, request, client_address, socket_server):
self._connect_handlers = [
self.RedirectConnectHandler,
self.ServerAuthConnectHandler,
self.DefaultConnectResponseHandler]
self._get_handlers = [
self.KillHandler,
self.NoCacheMaxAgeTimeHandler,
self.NoCacheTimeHandler,
self.CacheTimeHandler,
self.CacheExpiresHandler,
self.CacheProxyRevalidateHandler,
self.CachePrivateHandler,
self.CachePublicHandler,
self.CacheSMaxAgeHandler,
self.CacheMustRevalidateHandler,
self.CacheMustRevalidateMaxAgeHandler,
self.CacheNoStoreHandler,
self.CacheNoStoreMaxAgeHandler,
self.CacheNoTransformHandler,
self.DownloadHandler,
self.DownloadFinishHandler,
self.EchoHeader,
self.EchoAllHandler,
self.FileHandler,
self.RealFileWithCommonHeaderHandler,
self.RealBZ2FileWithCommonHeaderHandler,
self.AuthBasicHandler,
self.AuthDigestHandler,
self.SlowServerHandler,
self.ContentTypeHandler,
self.ServerRedirectHandler,
self.ClientRedirectHandler,
self.DefaultResponseHandler]
self._post_handlers = [
self.WriteFile,
self.EchoTitleHandler,
self.EchoAllHandler,
self.EchoHandler] + self._get_handlers
self._mime_types = {
'gif': 'image/gif',
'jpeg' : 'image/jpeg',
'jpg' : 'image/jpeg'
}
self._default_mime_type = 'text/html'
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request,
client_address,
socket_server)
def _ShouldHandleRequest(self, handler_name):
"""Determines if the path can be handled by the handler.
We consider a handler valid if the path begins with the
handler name. It can optionally be followed by "?*", "/*".
"""
pattern = re.compile('%s($|\?|/).*' % handler_name)
return pattern.match(self.path)
def GetMIMETypeFromName(self, file_name):
"""Returns the mime type for the specified file_name. So fa
|
r it only looks
at the file extension."""
(shortname, extension) = os.path.splitext(file_name)
if len(extension) == 0:
# no extension.
|
return self._default_mime_type
# extension starts with a dot, so we need to remove it
return self._mime_types.get(extension[1:], self._default_mime_type)
def KillHandler(self):
"""This request handler kills the server, for use when we're done"
with the a particular test."""
if (self.path.find("kill") < 0):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=0')
self.end_headers()
self.wfile.write("Time to die")
self.server.stop = True
return True
def NoCacheMaxAgeTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime/maxage"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=0')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def NoCacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'no-cache')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for one minute."""
if not self._ShouldHandleRequest("/cachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=60')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheExpiresHandler(self):
"""This request handler yields a page with the title set to the current
system time, and set the page to expire on 1 Jan 2099."""
if not self._ShouldHandleRequest("/cache/expires"):
return False
self.send_response(200)
self.send_header('Expires', 'Thu, 1 Jan 2099 00:00:00 GMT')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheProxyRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 60 seconds"""
if not self._ShouldHandleRequest("/cache/proxy-revalidate"):
return False
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, proxy-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePrivateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/private"):
return False
self.send_res
|
Kami/libcloud
|
libcloud/common/types.py
|
Python
|
apache-2.0
| 6,989
| 0
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from typing import Callable
from typing import Union
from typing import cast
from enum import Enum
if False:
# Work around for MYPY for cyclic import problem
from libcloud.compute.base import BaseDriver
__all__ = [
"Type",
"LibcloudError",
"MalformedResponseError",
"ProviderError",
"InvalidCredsError",
"InvalidCredsException",
"LazyList"
]
class Type(str, Enum):
@classmethod
def tostring(cls, value):
# type: (Union[Enum, str]) -> str
"""Return the string representation of the state object attribute
:param str value: the state object to turn into string
:return: the uppercase string that represents the state object
:rtype: str
"""
value = cast(Enum, value)
return str(value._value_).upper()
@classmethod
def fromstring(cls, value):
# type: (str) -> str
"""Return the state object attribute that matches the string
:param str value: the string to look up
:return: the state object attribute that matches the string
:rtype: str
"""
return getattr(cls, value.upper(), None)
"""
NOTE: These methods are here for backward compatibility reasons where
Type values were simple strings and Type didn't inherit from Enum.
"""
def __eq__(self, other):
if isinstance(other, Type):
return other.value == self.value
elif isinstance(other, str):
return self.value == other
return super(Type, self).__eq__(other)
def upper(self):
return self.value.upper() # pylint: disable=no-member
def lower(self):
return self.value.lower() # pylint: disable=no-member
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return str(self.value)
def __repr__(self):
return self.value
def __hash__(self):
return id(self)
class LibcloudError(Exception):
"""The base class for other libcloud exceptions"""
def __init__(self, value, driver=None):
# type: (str, BaseDriver) -> None
super(LibcloudError, self).__init__(value)
self.value = value
self.driver = driver
def __str__(self):
return self.__repr__()
def __repr__(self):
return ("<LibcloudError in " +
repr(self.driver) +
" " +
repr(self.value) + ">")
class MalformedResponseError(LibcloudError):
"""Exception for the cases when a provider returns a malformed
response, e.g. you request JSON and provider returns
'<h3>something</h3>' due to some error on their side."""
def __init__(self, value, body=None, driver=None):
# type: (str, Optional[str], Optional[BaseDriver]) -> None
self.value = value
self.driver = driver
self.body = body
def __str__(self):
return self.__repr__()
def __repr__(self):
return ("<MalformedResponseException in " +
repr(self.driver) +
" " +
repr(self.value) +
">: " +
repr(self.body))
class ProviderError(LibcloudError):
"""
Exception used when provider gives back
error response (HTTP 4xx, 5xx) for a request.
Specific sub types can be derived for errors like
HTTP 401 : InvalidCredsError
HTTP 404 : NodeNotFoundError, ContainerDoesNotExistError
"""
def __init__(self, value, http_code, driver=None):
# type: (str, int, Optional[BaseDriver]) -> None
super(ProviderError, self).__init__(value=value, driver=driver)
self.http_code = http_code
def __str__(self):
return self.__repr__()
def __repr__(self):
return repr(self.value)
class InvalidCredsError(ProviderError):
"""Exception used when invalid credentials are used on a provider."""
def __init__(self, value='Invalid credentials with the provider',
driver=None):
# type: (str, Optional[BaseDriver]) -> None
# NOTE: We don't use http.client constants here since that adds ~20ms
# import time overhead
super(InvalidCredsError, self).__init__(value,
http_code=401,
driver=driver)
# Deprecated alias of :class:`InvalidCredsError`
InvalidCredsException = InvalidCredsError
class ServiceUnavailableError(ProviderError):
"""Exception used when a provider returns 503 Service Unavailable."""
def __init__(self, value='Service unavailable at provider', driver=None):
# type: (str, Optional[BaseDriver]) -> None
# NOTE: We don't use http.client constants here since that adds ~20ms
# import time overhead
super(ServiceUnavailableError, self).__init__(
value,
http_code=503,
driver=driver
)
class LazyList(object):
def __init__(self, get_more, value_dict=None):
# type: (Callable, Optional[dict]) -> None
self._data = [] # type: list
self._last_key = None
self._exhausted = False
self._all_loaded = False
self._get_more = get_more
self._value_dict = value_dict or {}
def __iter__(self):
if not self._all_loaded:
self._load_all()
data = self._data
for i in data:
yield i
def __getitem__(self, index):
if index >= len(self._data) and not self._all_loaded:
self._load_all()
return self._data[index]
def __len__(self):
self._load_all()
return len(self._data)
def __repr__(self):
self._load_all()
repr_string = ', ' .join([repr(item) for item in sel
|
f._data])
repr_string = '[%s]' % (repr_string)
return rep
|
r_string
def _load_all(self):
while not self._exhausted:
newdata, self._last_key, self._exhausted = \
self._get_more(last_key=self._last_key,
value_dict=self._value_dict)
self._data.extend(newdata)
self._all_loaded = True
|
nint8835/NintbotForDiscordV2
|
NintbotForDiscord/Enums.py
|
Python
|
mit
| 1,656
| 0
|
from enum imp
|
ort Enum
class EventType(Enum):
"""Enum containing the various types of events that can occur."""
CLIENT_READY = "CLIENT_READY"
CLIENT_RESUMED = "CLIENT_RESUMED"
MESSAGE_RECEIVED = "MESSAGE_RECEIVED"
SERVER_MESSAGE_RECEIVED = "SERVER_MESSAGE_RECEIVED"
PRIVATE_MESSAGE_REC
|
EIVED = "PRIVATE_MESSAGE_RECEIVED"
COMMAND_RECEIVED = "COMMAND_RECEIVED"
MESSAGE_DELETED = "MESSAGE_DELETED"
MESSAGE_EDITED = "MESSAGE_EDITED"
CHANNEL_DELETED = "CHANNEL_DELETED"
CHANNEL_CREATED = "CHANNEL_CREATED"
CHANNEL_UPDATED = "CHANNEL_UPDATED"
MEMBER_JOINED = "MEMBER_JOINED"
MEMBER_REMOVED = "MEMBER_REMOVED"
MEMBER_UPDATED = "MEMBER_UPDATED"
MEMBER_BANNED = "MEMBER_BANNED"
MEMBER_UNBANNED = "MEMBER_UNBANNED"
MEMBER_TYPING = "MEMBER_TYPING"
SERVER_JOINED = "SERVER_JOINED"
SERVER_REMOVED = "SERVER_REMOVED"
SERVER_UPDATED = "SERVER_UPDATED"
ROLE_CREATED = "ROLE_CREATED"
ROLE_DELETED = "ROLE_DELETED"
ROLE_UPDATED = "ROLE_UPDATED"
SERVER_AVAILABLE = "SERVER_AVAILABLE"
SERVER_UNAVAILABLE = "SERVER_UNAVAILABLE"
VOICE_STATE_UPDATED = "VOICE_STATE_UPDATED"
REACTION_ADDED = "REACTION_ADDED"
REACTION_REMOVED = "REACTION_REMOVED"
REACTIONS_CLEARED = "REACTIONS_CLEARED"
MEMBER_JOINED_GROUP = "MEMBER_JOINED_GROUP"
MEMBER_REMOVED_FROM_GROUP = "MEMBER_REMOVED_FROM_GROUP"
SERVER_EMOJIS_UPDATED = "SERVER_EMOJIS_UPDATED"
class RedisStorageScope(Enum):
"""Enum containing the possible Redis storage scopes."""
GLOBAL = "GLOBAL"
PLUGIN = "PLUGIN"
SERVER = "SERVER"
CHANNEL = "CHANNEL"
USER = "USER"
|
kikinteractive/MaxMind-DB-Reader-python
|
setup.py
|
Python
|
apache-2.0
| 5,415
| 0.000554
|
import os
import re
import sys
# This import is apparently needed for Nose on Red Hat's Python
import multiprocessing
from distutils.command.build_ext import build_ext
from distutils.errors import (CCompilerError, DistutilsExecError,
DistutilsPlatformError)
try:
from setuptools import setup, Extension, Feature
except ImportError:
from distutils.core import setup, Extension
Feature = None
cmdclass = {}
PYPY = hasattr(sys, 'pypy_version_info')
JYTHON = sys.platform.startswith('java')
requirements = []
if sys.version_info[0] == 2 or (sys.version_info[0] == 3
and sys.version_info[1] < 3):
requirements.append('ipaddr')
compile_args = ['-Wall', '-Wextra']
if sys.version_info[0] == 2:
compile_args.append('-fno-strict-aliasing')
ext_module = [
Extension(
'maxminddb.extension',
libraries=['maxminddb'],
sources=['maxminddb/extension/maxminddb.c'],
extra_compile_args=compile_args,
)
]
# Cargo cult code for installing extension with pure Python fallback.
# Taken from SQLAlchemy, but this same basic code exists in many modules.
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
if sys.platform == 'win32':
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
ext_errors += (IOError,)
class BuildFailed(Exception):
def __init__(self):
self.cause = sys.exc_info()[1] # work around py 2/3 different syntax
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
except ValueError:
# this can happen on Windows 64 bit, see Python issue 7511
if "'path'" in str(sys.exc_info()[1]): # works with both py 2/3
raise BuildFailed()
raise
cmdclass['build_ext'] = ve_build_ext
#
ROOT = os.path.dirname(__file__)
with open(os.path.join(ROOT, 'README.rst'), 'rb') as fd:
README = fd.read().decode('utf8')
with open(os.path.join(ROOT, 'maxminddb', '__init__.py'), 'rb') as fd:
maxminddb_text = fd.read().decode('utf8')
LICENSE = re.compile(
r".*__license__ = '(.*?)'", re.S).match(maxminddb_text).group(1)
VERSION = re.compile(
r".*__version__ = '(.*?)'", re
|
.S).match(maxminddb_text).group(1)
def status_msgs(*msgs):
print('*' * 75)
for msg in msgs:
print(msg)
print('*' * 75)
def find_packages(location):
packages = []
for pkg in ['maxminddb']:
for _dir, subdirectories, files in (
os.walk(os.path.join(location, pkg))):
if '__init__.
|
py' in files:
tokens = _dir.split(os.sep)[len(location.split(os.sep)):]
packages.append(".".join(tokens))
return packages
def run_setup(with_cext):
kwargs = {}
if with_cext:
if Feature:
kwargs['features'] = {'extension': Feature(
"optional C implementation",
standard=True,
ext_modules=ext_module
)}
else:
kwargs['ext_modules'] = ext_module
setup(
name='maxminddb',
version=VERSION,
description='Python extension for reading the MaxMind DB format',
long_description=README,
url='http://www.maxmind.com/',
bugtrack_url='https://github.com/maxmind/MaxMind-DB-Reader-python/issues',
packages=find_packages('.'),
package_data={'': ['LICENSE']},
package_dir={'maxminddb': 'maxminddb'},
include_package_data=True,
install_requires=requirements,
tests_require=['nose'],
test_suite='nose.collector',
license=LICENSE,
cmdclass=cmdclass,
classifiers=(
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python',
'Topic :: Internet :: Proxy Servers',
'Topic :: Internet',
),
**kwargs
)
if PYPY or JYTHON:
run_setup(False)
status_msgs(
"WARNING: Disabling C extension due to Python platform.",
"Plain-Python build succeeded."
)
else:
try:
run_setup(True)
except BuildFailed as exc:
status_msgs(
exc.cause,
"WARNING: The C extension could not be compiled, " +
"speedups are not enabled.",
"Failure information, if any, is above.",
"Retrying the build without the C extension now."
)
run_setup(False)
status_msgs(
"WARNING: The C extension could not be compiled, " +
"speedups are not enabled.",
"Plain-Python build succeeded."
)
|
Kennyl/calibre-web
|
cps/ub.py
|
Python
|
gpl-3.0
| 23,868
| 0.005237
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sqlalchemy import *
from sqlalchemy import exc
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import *
from flask_login import AnonymousUserMixin
import sys
import os
import logging
from werkzeug.security import generate_password_hash
from flask_babel import gettext as _
import json
import datetime
from binascii import hexlify
dbpath = os.path.join(os.path.normpath(os.getenv("CALIBRE_DBPATH", os.path.dirname(os.path.realpath(__file__)) + os.sep + ".." + os.sep)), "app.db")
engine = create_engine('sqlite:///{0}'.format(dbpath), echo=False)
Base = declarative_base()
ROLE_USER = 0
ROLE_ADMIN = 1
ROLE_DOWNLOAD = 2
ROLE_UPLOAD = 4
ROLE_EDIT = 8
ROLE_PASSWD = 16
ROLE_ANONYMOUS = 32
ROLE_EDIT_SHELFS = 64
ROLE_DELETE_BOOKS = 128
DETAIL_RANDOM = 1
SIDEBAR_LANGUAGE = 2
SIDEBAR_SERIES = 4
SIDEBAR_CATEGORY = 8
SIDEBAR_HOT = 16
SIDEBAR_RANDOM = 32
SIDEBAR_AUTHOR = 64
SIDEBAR_BEST_RATED = 128
SIDEBAR_READ_AND_UNREAD = 256
SIDEBAR_RECENT = 512
SIDEBAR_SORTED = 1024
DEFAULT_PASS = "admin123"
DEFAULT_PORT = int(os.environ.get("CALIBRE_PORT", 8083))
DEVELOPMENT = False
class UserBase:
@property
def is_authenticated(self):
return True
def role_admin(self):
if self.role is not None:
return True if self.role & ROLE_ADMIN == ROLE_ADMIN else False
else:
return False
def role_download(self):
if self.role is not None:
return True if self.role & ROLE_DOWNLOAD == ROLE_DOWNLOAD else False
else:
return False
def role_upload(self):
return bool((self.role is not None)and(self.role & ROLE_UPLOAD == ROLE_UPLOAD))
def role_edit(self):
if self.role is not None:
return True if self.role & ROLE_EDIT == ROLE_EDIT else False
else:
return False
def role_passwd(self):
if self.role is not None:
return True if self.role & ROLE_PASSWD == ROLE_PASSWD else False
else:
return False
def role_anonymous(self):
if self.role is not None:
return True if self.role & ROLE_ANONYMOUS == ROLE_ANONYMOUS else False
else:
return False
def role_edit_shelfs(self):
if self.role is not None:
return True if self.role & ROLE_EDIT_SHELFS == ROLE_EDIT_SHELFS else False
else:
return False
def role_delete_books(self):
return bool((self.role is not None)and(self.role & ROLE_DELETE_BOOKS == ROLE_DELETE_BOOKS))
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
return str(self.id)
def filter_language(self):
return self.default_language
def show_random_books(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_RANDOM == SIDEBAR_RANDOM))
def show_language(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_LANGUAGE == SIDEBAR_LANGUAGE))
def show_hot_books(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_HOT == SIDEBAR_HOT))
def show_recent(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_RECENT == SIDEBAR_RECENT))
def show_sorted(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_SORTED == SIDEBAR_SORTED))
def show_series(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_SERIES == SIDEBAR_SERIES))
def show_category(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_CATEGORY == SIDEBAR_CATEGORY))
def show_author(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_AUTHOR == SIDEBAR_AUTHOR))
def show_best_rated_books(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_BEST_RATED == SIDEBAR_BEST_RATED))
def show_read_and_unread(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & SIDEBAR_READ_AND_UNREAD == SIDEBAR_READ_AND_UNREAD))
def show_detail_random(self):
return bool((self.sidebar_view is not None)and(self.sidebar_view & DETAIL_RANDOM == DETAIL_RANDOM))
def __repr__(self):
return '<User %r>' % self.nickname
# Baseclass for Users in Calibre-web, settings which are depending on certain users are stored here. It is derived from
# User Base (all access methods are declared there)
class User(UserBase, Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
nick
|
name = Column(String(64), unique=True)
email = Column(String(120), unique=Tru
|
e, default="")
role = Column(SmallInteger, default=ROLE_USER)
password = Column(String)
kindle_mail = Column(String(120), default="")
shelf = relationship('Shelf', backref='user', lazy='dynamic', order_by='Shelf.name')
downloads = relationship('Downloads', backref='user', lazy='dynamic')
locale = Column(String(2), default="en")
sidebar_view = Column(Integer, default=1)
default_language = Column(String(3), default="all")
mature_content = Column(Boolean, default=True)
# Class for anonymous user is derived from User base and complets overrides methods and properties for the
# anonymous user
class Anonymous(AnonymousUserMixin, UserBase):
def __init__(self):
self.loadSettings()
def loadSettings(self):
data = session.query(User).filter(User.role.op('&')(ROLE_ANONYMOUS) == ROLE_ANONYMOUS).first() # type: User
settings = session.query(Settings).first()
self.nickname = data.nickname
self.role = data.role
self.id=data.id
self.sidebar_view = data.sidebar_view
self.default_language = data.default_language
self.locale = data.locale
self.mature_content = data.mature_content
self.anon_browse = settings.config_anonbrowse
def role_admin(self):
return False
@property
def is_active(self):
return False
@property
def is_anonymous(self):
return self.anon_browse
@property
def is_authenticated(self):
return False
# Baseclass representing Shelfs in calibre-web inapp.db
class Shelf(Base):
__tablename__ = 'shelf'
id = Column(Integer, primary_key=True)
name = Column(String)
is_public = Column(Integer, default=0)
user_id = Column(Integer, ForeignKey('user.id'))
def __repr__(self):
return '<Shelf %r>' % self.name
# Baseclass representing Relationship between books and Shelfs in Calibre-web in app.db (N:M)
class BookShelf(Base):
__tablename__ = 'book_shelf_link'
id = Column(Integer, primary_key=True)
book_id = Column(Integer)
order = Column(Integer)
shelf = Column(Integer, ForeignKey('shelf.id'))
def __repr__(self):
return '<Book %r>' % self.id
class ReadBook(Base):
__tablename__ = 'book_read_link'
id = Column(Integer, primary_key=True)
book_id = Column(Integer, unique=False)
user_id = Column(Integer, ForeignKey('user.id'), unique=False)
is_read = Column(Boolean, unique=False)
class Bookmark(Base):
__tablename__ = 'bookmark'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'))
book_id = Column(Integer)
format = Column(String(collation='NOCASE'))
bookmark_key = Column(String)
# Baseclass representing Downloads from calibre-web in app.db
class Downloads(Base):
__tablename__ = 'downloads'
id = Column(Integer, primary_key=True)
book_id = Column(Integer)
user_id = Column(Integer, ForeignKey('user.id'))
def __repr__(self):
return '<Download %r' % self.book_id
# Baseclass for representing settings in app.db with email server settings and Calibre database settings
# (application settings)
class Settings(Base):
__tablename__ = 'settings'
id = Column(Integer, primary_key=True)
mail_server = Column(String)
mail_port = Col
|
OneDrive/onedrive-sdk-python
|
src/onedrivesdk/model/items_collection_page.py
|
Python
|
mit
| 1,072
| 0.005597
|
# -*- coding
|
: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overw
|
ritten.
'''
from __future__ import unicode_literals
from ..collection_base import CollectionPageBase
from ..model.item import Item
class ItemsCollectionPage(CollectionPageBase):
def __getitem__(self, index):
"""Get the Item at the index specified
Args:
index (int): The index of the item to get from the ItemsCollectionPage
Returns:
:class:`Item<onedrivesdk.model.item.Item>`:
The Item at the index
"""
return Item(self._prop_list[index])
def items(self):
"""Get a generator of Item within the ItemsCollectionPage
Yields:
:class:`Item<onedrivesdk.model.item.Item>`:
The next Item in the collection
"""
for item in self._prop_list:
yield Item(item)
|
erramuzpe/C-PAC
|
CPAC/median_angle/tests/test_median_angle.py
|
Python
|
bsd-3-clause
| 1,151
| 0.01477
|
def test_median_angle_correct():
from CPAC.median_angle import median_angle_correct
import numpy as np
import nibabel as nb
def getY(filepath):
nii = nb.load(filepath)
data = nii.get_data().astype(np.float64)
mask = (data != 0).sum(-1) != 0
return data[mask].T
def normalize(X):
Xc = X - X.mean(0)
return Xc/np.sqrt( (Xc**2).sum(0) )
subject = '/home/data/Projects/nuisance_reliability_paper/working_dir_CPAC_order/resting_preproc/funcpreproc/_session_id_NYU_TRT_session1_subject_id_sub05676/func_scale/mapflow/_func_scale0/lfo_3dc_RPI_3dv_3dc_maths.n
|
ii.gz'
target_angle = 88.0
Y_orig = normalize(getY(subject))
U_orig, S, Vh = np.linalg.svd(Y_
|
orig, full_matrices=False)
corrected_file, angles_file = median_angle_correct(target_angle, subject)
Y_corr = normalize(getY(corrected_file))
median_angle_orig = np.median(np.arccos(U_orig[:,0].T.dot(Y_orig)))
median_angle_corr = np.median(np.arccos(U_orig[:,0].T.dot(Y_corr)))
print median_angle_orig*180.0/np.pi, median_angle_corr*180.0/np.pi
|
rafaelsierra/estoudebike-api
|
src/bike_auth/models.py
|
Python
|
apache-2.0
| 376
| 0.00266
|
import uuid
from django.contrib.auth.models import User
from django.db import models
class Token(models.Model):
key = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
created_at = models.DateTimeField(auto_now_ad
|
d=True)
is_active = models.BooleanField(default=True, db_index=True)
user = models.ForeignKey(User, null=True, blank=True)
| |
seijim/cloud-robotics-fx-v2
|
CloudRoboticsApi/ClientCode_Pepper/HeadWaters/PepperCode2/lib/cloudrobotics/conversation/message.py
|
Python
|
mit
| 746
| 0.002809
|
# -*- coding: utf-8 -*-
#
# Cloud Robotics FX 会話理解API用メッセージ
#
# @author: Osamu Noguchi <noguchi@headwaters.co.jp>
# @version: 0.0.1
import cloudrobotics.message as message
APP_ID = 'SbrApiServices'
PROCESSING_ID = 'RbAppConversationApi'
# 会話メッセージ
#
class ConversationMessage(message.CRFXMessage):
def __init__(self, visitor, visitor_id, talkByMe, type):
super(ConversationMessage, self).__in
|
it__()
self.header['RoutingType'] = message.ROUTING_TYPE_CALL
self.header['AppProcessingId'] = PROCESSING_ID
self.header['MessageId'] = type
self.body = {
'visitor': visitor,
'visitor_id': visitor_id,
|
'talkByMe': talkByMe
}
|
Fleurer/flask-oauthlib
|
example/facebook.py
|
Python
|
bsd-3-clause
| 1,663
| 0
|
from flask import Flask, redirect, url_for, session, request
from flask_oauthlib.client import OAuth, OAuthException
FACEBOOK_APP_ID = '188477911223606'
FACEBOOK_APP_SECRET = '621413ddea2bcc5b2e83d42fc40495de'
app = Flask(__name__)
app.debug = True
app.secret_key = 'development'
oauth = OAuth(app)
facebook = oauth.remote_app(
'facebook',
consumer_key=FACEBOOK_APP_ID,
consumer_secret=FACEBOOK_APP_SECRET,
request_token_params={'scope': 'email'},
base_url='https://graph.facebook.com',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth'
)
@app.route('/')
def index():
return redire
|
ct(url_for('login'))
@app.route('/login')
def login():
callback = url_for(
'facebook_authorized',
next=request.args.get('next') or request.referrer or None,
_external=True
)
return facebook.authorize(callback=callback)
@app.route('/login/authorized')
def facebook_aut
|
horized():
resp = facebook.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
if isinstance(resp, OAuthException):
return 'Access denied: %s' % resp.message
session['oauth_token'] = (resp['access_token'], '')
me = facebook.get('/me')
return 'Logged in as id=%s name=%s redirect=%s' % \
(me.data['id'], me.data['name'], request.args.get('next'))
@facebook.tokengetter
def get_facebook_oauth_token():
return session.get('oauth_token')
if __name__ == '__main__':
app.run()
|
rob-b/belt
|
belt/values.py
|
Python
|
bsd-3-clause
| 1,091
| 0
|
import os
import logging
from .axle import split_package_name
logger = logging.getLogger(__name__)
class Path(object):
def __init__(self, path):
self.path = path
@property
def exists(self):
return os.path.exists(self.path)
class ReleaseValue(object):
_md5 = ''
def __init__(self, name, package_dir):
self.name, self.number = split_package_name(name)
self.fullname = name
self.package_dir = package_dir
def __eq__(self, other):
return self.fullname == other
def __repr__(self):
return self.fullname
@property
def md5(self):
if not self._md5:
hash_name = self.fullpath + '.md5'
try:
with open(hash_name) as hashed:
|
self._md5 = hashed.read()
except IOError:
# msg = u'{} does not exist'.format(hash_name)
# logger.exception(msg)
pass
return self._md5
@property
|
def fullpath(self):
return os.path.join(self.package_dir, self.fullname)
|
vuolter/pyload
|
src/pyload/core/managers/event_manager.py
|
Python
|
agpl-3.0
| 3,084
| 0
|
# -*- coding: utf-8 -*-
import time
from ..utils.purge import uniquify
class EventManager:
def __init__(self, core):
self.pyload = core
self._ = core._
self.clients = []
def new_client(self, uuid):
self.clients.append(Client(uuid))
def clean(self):
for n, client in enumerate(self.clients):
if client.last_active + 30 < time.time():
del self.clients[n]
def get_events(self, uuid):
events = []
valid_
|
uuid = False
for client in self.clients:
if client.uuid == uuid:
client.last_active = time.time()
valid_uuid = True
while cli
|
ent.new_events():
events.append(client.pop_event().to_list())
break
if not valid_uuid:
self.new_client(uuid)
events = [
ReloadAllEvent("queue").to_list(),
ReloadAllEvent("collector").to_list(),
]
return uniquify(events) # return uniquify(events, repr)
def add_event(self, event):
for client in self.clients:
client.add_event(event)
class Client:
def __init__(self, uuid):
self.uuid = uuid
self.last_active = time.time()
self.events = []
def new_events(self):
return len(self.events) > 0
def pop_event(self):
if not len(self.events):
return None
return self.events.pop(0)
def add_event(self, event):
self.events.append(event)
class UpdateEvent:
def __init__(self, itype, iid, destination):
assert itype == "pack" or itype == "file"
assert destination == "queue" or destination == "collector"
self.type = itype
self.id = iid
self.destination = destination
def to_list(self):
return ["update", self.destination, self.type, self.id]
class RemoveEvent:
def __init__(self, itype, iid, destination):
assert itype == "pack" or itype == "file"
assert destination == "queue" or destination == "collector"
self.type = itype
self.id = iid
self.destination = destination
def to_list(self):
return ["remove", self.destination, self.type, self.id]
class InsertEvent:
def __init__(self, itype, iid, after, destination):
assert itype == "pack" or itype == "file"
assert destination == "queue" or destination == "collector"
self.type = itype
self.id = iid
self.after = after
self.destination = destination
def to_list(self):
return ["insert", self.destination, self.type, self.id, self.after]
class ReloadAllEvent:
def __init__(self, destination):
assert destination == "queue" or destination == "collector"
self.destination = destination
def to_list(self):
return ["reload", self.destination]
class AccountUpdateEvent:
def to_list(self):
return ["account"]
class ConfigUpdateEvent:
def to_list(self):
return ["config"]
|
crmccreary/openerp_server
|
openerp/addons/board/__init__.py
|
Python
|
agpl-3.0
| 1,082
| 0.001848
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You sho
|
uld have received a copy of the GNU Affero Gen
|
eral Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import board
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/bar/hoverlabel/_namelengthsrc.py
|
Python
|
mit
| 432
| 0.002315
|
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
|
self, plotly_name="namelengthsrc", parent_name="bar.hoverlabel", **kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
|
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
mrjacobagilbert/gnuradio
|
grc/core/generator/hier_block.py
|
Python
|
gpl-3.0
| 6,202
| 0.000967
|
import collections
import os
import codecs
from .top_block import TopBlockGenerator
from .. import Constants
from ..io import yaml
class HierBlockGenerator(TopBlockGenerator):
"""Extends the top block generator to also generate a block YML file"""
def __init__(self, flow_graph, _):
"""
Initialize the hier block generator object.
Args:
flow_graph: the flow graph object
"""
platform = flow_graph.parent
output_dir = platform.config.hier_block_lib_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
TopBlockGenerator.__init__(self, flow_graph, output_dir)
self._mode = Constants.HIER_BLOCK_FILE_MODE
self.file_path_yml = self.file_path[:-3] + '.block.yml'
def write(self):
"""generate output and write it to files"""
TopBlockGenerator.write(self)
data = yaml.dump(self._build_block_n_from_flow_graph_io())
replace = [
('parameters:', '\nparameters:'),
('inputs:', '\ninputs:'),
('outputs:', '\noutputs:'),
('asserts:', '\nasserts:'),
('templates:', '\ntemplates:'),
('documentation:', '\ndocumentation:'),
('file_format:', '\nfile_format:'),
]
for r in replace:
data = data.replace(*r)
with codecs.open(self.file_path_yml, 'w', encoding='utf-8') as fp:
fp.write(data)
# Windows only supports S_IREAD an
|
d S_IWRITE, other flags are ignored
os.chmod(self.file_path_yml, self._mode)
def _build_block_n_from_flow_graph_io(self):
"""
Generate a block YML nested data from the flow graph IO
Returns:
a yml node tree
"""
# Extract info from the flow graph
block_id = self._flow_graph.get_option('id')
parameters = self._flow_graph.get_parameters()
|
def var_or_value(name):
if name in (p.name for p in parameters):
return "${" + name + " }"
return name
# Build the nested data
data = collections.OrderedDict()
data['id'] = block_id
data['label'] = (
self._flow_graph.get_option('title') or
self._flow_graph.get_option('id').replace('_', ' ').title()
)
data['category'] = self._flow_graph.get_option('category')
# Parameters
data['parameters'] = []
for param_block in parameters:
p = collections.OrderedDict()
p['id'] = param_block.name
p['label'] = param_block.params['label'].get_value() or param_block.name
p['dtype'] = param_block.params['value'].dtype
p['default'] = param_block.params['value'].get_value()
p['hide'] = param_block.params['hide'].get_value()
data['parameters'].append(p)
# Ports
for direction in ('inputs', 'outputs'):
data[direction] = []
for port in get_hier_block_io(self._flow_graph, direction):
p = collections.OrderedDict()
p['label'] = port.parent.params['label'].value
if port.domain != Constants.DEFAULT_DOMAIN:
p['domain'] = port.domain
p['dtype'] = port.dtype
if port.domain != Constants.GR_MESSAGE_DOMAIN:
p['vlen'] = var_or_value(port.vlen)
if port.optional:
p['optional'] = True
data[direction].append(p)
t = data['templates'] = collections.OrderedDict()
t['imports'] = "from {0} import {0} # grc-generated hier_block".format(
self._flow_graph.get_option('id'))
# Make data
if parameters:
t['make'] = '{cls}(\n {kwargs},\n)'.format(
cls=block_id,
kwargs=',\n '.join(
'{key}=${{ {key} }}'.format(key=param.name) for param in parameters
),
)
else:
t['make'] = '{cls}()'.format(cls=block_id)
# Self-connect if there aren't any ports
if not data['inputs'] and not data['outputs']:
t['make'] += '\nself.connect(self.${id})'
# Callback data
t['callbacks'] = [
'set_{key}(${{ {key} }})'.format(key=param_block.name) for param_block in parameters
]
# Documentation
data['documentation'] = "\n".join(field for field in (
self._flow_graph.get_option('author'),
self._flow_graph.get_option('description'),
self.file_path
) if field)
data['grc_source'] = str(self._flow_graph.grc_file_path)
data['file_format'] = 1
return data
class QtHierBlockGenerator(HierBlockGenerator):
def _build_block_n_from_flow_graph_io(self):
n = HierBlockGenerator._build_block_n_from_flow_graph_io(self)
block_n = collections.OrderedDict()
# insert flags after category
for key, value in n.items():
block_n[key] = value
if key == 'category':
block_n['flags'] = 'need_qt_gui'
if not block_n['label'].upper().startswith('QT GUI'):
block_n['label'] = 'QT GUI ' + block_n['label']
gui_hint_param = collections.OrderedDict()
gui_hint_param['id'] = 'gui_hint'
gui_hint_param['label'] = 'GUI Hint'
gui_hint_param['dtype'] = 'gui_hint'
gui_hint_param['hide'] = 'part'
block_n['parameters'].append(gui_hint_param)
block_n['templates']['make'] += (
"\n<% win = 'self.%s'%id %>"
"\n${ gui_hint() % win }"
)
return block_n
def get_hier_block_io(flow_graph, direction, domain=None):
"""
Get a list of io ports for this flow graph.
Returns a list of blocks
"""
pads = flow_graph.get_pad_sources() if direction == 'inputs' else flow_graph.get_pad_sinks()
for pad in pads:
for port in (pad.sources if direction == 'inputs' else pad.sinks):
if domain and port.domain != domain:
continue
yield port
|
andreipradan/raspberrymediaplayer
|
src/radio/api/utils.py
|
Python
|
mit
| 328
| 0
|
import subprocess
de
|
f send_command(*args):
delimiter = '&&'
if len(args) == 2 and delimiter in args[1]:
split_arg = args[1].split(delimiter)
args = [args[1]]
args.extend(split_arg)
process = subprocess.Popen(args, stdout=subprocess.PIPE)
return process.communicate()[0].decode("utf-8"
|
)
|
lukeshingles/artistools
|
artistools/makemodel/__init__.py
|
Python
|
mit
| 65
| 0.015385
|
# import .1dslicefrom3d
import artistools.make
|
model.botyanski2
|
017
|
lwahlmeier/python-litesockets
|
tests/__init__.py
|
Python
|
unlicense
| 24
| 0.041667
|
#fro
|
m . import sslTests
| |
aldenjenkins/foobargamingwebsite
|
paypal/standard/ipn/south_migrations/0006_auto__chg_field_paypalipn_custom__chg_field_paypalipn_transaction_subj.py
|
Python
|
bsd-3-clause
| 14,862
| 0.007872
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'PayPalIPN.custom'
db.alter_column(u'paypal_ipn', 'custom', self.gf('django.db.models.fields.CharField')(max_length=256))
# Changing field 'PayPalIPN.transaction_subject'
db.alter_column(u'paypal_ipn', 'transaction_subject', self.gf('django.db.models.fields.CharField')(max_length=256))
def backwards(self, orm):
# Changing field 'PayPalIPN.custom'
db.alter_column(u'paypal_ipn', 'custom', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'PayPalIPN.transaction_subject'
db.alter_column(u'paypal_ipn', 'transaction_subject', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
u'ipn.paypalipn': {
'Meta': {'object_name': 'PayPalIPN', 'db_table': "u'paypal_ipn'"},
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_country_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'address_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_state': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'amount_per_cycle': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'auction_buyer_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'auction_closing_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'auction_multi_item': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'auth_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '
|
64', 'decimal_places': '2', 'blank': 'True'}),
'auth_exp': ('django.db.models.fields.CharField', [], {'max_length': '28', 'blank': 'True'}),
'auth_id': ('django.db.models.fields.CharField', [], {'max_length': '19', 'blank': 'True'}),
'auth_status': ('django.db.models.fields.CharField'
|
, [], {'max_length': '255', 'blank': 'True'}),
'business': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'case_creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'case_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'case_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'charset': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency_code': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'custom': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'exchange_rate': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '16', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flag_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'flag_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'for_auction': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'from_view': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'handling_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_payment_amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'invoice': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'ipaddress': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True', 'blank': 'True'}),
'item_name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'item_number': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'mc_amount1': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount2': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_amount3': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_currency': ('django.db.models.fields.CharField', [], {'default': "'USD'", 'max_length': '32', 'blank': 'True'}),
'mc_fee': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_gross': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_handling': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'mc_shipping': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '64', 'decimal_places': '2', 'blank': 'True'}),
'memo': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'mp_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'next_payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'notify_version': ('django.db.models.fields.DecimalFi
|
bluemini/kuma
|
vendor/packages/translate/storage/test_dtd.py
|
Python
|
mpl-2.0
| 20,624
| 0.001746
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
from pytest import mark
from translate.misc import wStringIO
from translate.storage import dtd, test_monolingual
def test_roundtrip_quoting():
specials = [
'Fish & chips',
'five < six',
'six > five',
'Use ',
'Use &nbsp;A "solution"',
"skop 'n bal",
'"""',
"'''",
'\n',
'\t',
'\r',
'Escape at end \\',
'',
'\\n',
'\\t',
'\\r',
'\\"',
'\r\n',
'\\r\\n',
'\\',
"Completed %S",
"&blockAttackSites;",
" ",
"&intro-point2-a;",
"&basePBMenu.label;",
#"Don't buy",
#"Don't \"buy\"",
"A \"thing\"",
"<a href=\"http"
]
for special in specials:
quoted_special = dtd.quotefordtd(special)
unquoted_special = dtd.unquotefromdtd(quoted_special)
print("special: %r\nquoted: %r\nunquoted: %r\n" % (special,
quoted_special,
unquoted_special))
assert special == unquoted_special
@mark.xfail(reason="Not Implemented")
def test_quotefordtd_unimplemented_cases():
"""Test unimplemented quoting DTD cases."""
assert dtd.quotefordtd("Between <p> and </p>") == ('"Between <p> and'
' </p>"')
def test_quotefordtd():
"""Test quoting DTD definitions"""
assert dtd.quotefordtd('') == '""'
assert dtd.quotefordtd("") == '""'
assert dtd.quotefordtd("Completed %S") == '"Completed %S"'
assert dtd.quotefordtd("&blockAttackSites;") == '"&blockAttackSites;"'
assert dtd.quotefordtd(" ") == '" "'
assert dtd.quotefordtd("&intro-point2-a;") == '"&intro-point2-a;"'
assert dtd.quotefordtd("&basePBMenu.label;") == '"&basePBMenu.label;"'
# The ' character isn't escaped as ' since the " char isn't present.
assert dtd.quotefordtd("Don't buy") == '"Don\'t buy"'
# The ' character is escaped as ' because the " character is present.
assert dtd.quotefordtd("Don't \"buy\"") == '"Don't "buy""'
assert dtd.quotefordtd("A \"thing\"") == '"A "thing""'
# The " character is not escaped when it indicates an attribute value.
assert dtd.quotefordtd("<a href=\"http") == "'<a href=\"http'"
# &
assert dtd.quotefordtd("Color & Light") == '"Color & Light"'
assert dtd.quotefordtd("Color & █") == '"Color & █"'
assert dtd.quotefordtd("Color&Light &red;") == '"Color&Light &red;"'
assert dtd.quotefordtd("Color & Light; Yes") == '"Color & Light; Yes"'
@mark.xfail(reason="Not Implemented")
def test_unquotefromdtd_unimplemented_cases():
"""Test unimplemented unquoting DTD cases."""
assert dtd.unquotefromdtd('"<p> and </p>"') == "<p> and </p>"
def test_unquotefromdtd():
"""Test unquoting DTD definitions"""
# %
assert dtd.unquotefromdtd('"Completed %S"') == "Completed %S"
assert dtd.unquotefromdtd('"Completed %S"') == "Completed %S"
assert dtd.unquotefromdtd('"Completed %S"') == "Completed %S"
# &entity;
assert dtd.unquotefromdtd('"Color&light █"') == "Color&light █"
assert dtd.unquotefromdtd('"Color & Light; Red"') == "Color & Light; Red"
assert dtd.unquotefromdtd('"&blockAttackSites;"') == "&blockAttackSites;"
assert dtd.unquotefromdtd('"&intro-point2-a;"') == "&intro-point2-a;"
assert dtd.unquotefromdtd('"&basePBMenu.label"') == "&basePBMenu.label"
# &
assert dtd.unquotefromdtd('"Color & Light"') == "Color & Light"
assert dtd.unquotefromdtd('"Color & █"') == "Color & █"
# nbsp
assert dtd.unquotefromdtd('" "') == " "
# '
assert dtd.unquotefromdtd("'Don't buy'") == "Don't buy"
# "
assert dtd.unquotefromdtd("'Don't "buy"'") == 'Don\'t "buy"'
assert dtd.unquotefromdtd('"A "thing""') == "A \"thing\""
assert dtd.unquotefromdtd('"A "thing""') == "A \"thing\""
assert dtd.unquotefromdtd("'<a href=\"http'") == "<a href=\"http"
# other chars
assert dtd.unquotefromdtd('"»"') == u"»"
def test_android_roundtrip_quoting():
specials = [
"don't
|
",
'the "thing"'
]
fo
|
r special in specials:
quoted_special = dtd.quoteforandroid(special)
unquoted_special = dtd.unquotefromandroid(quoted_special)
print("special: %r\nquoted: %r\nunquoted: %r\n" % (special,
quoted_special,
unquoted_special))
assert special == unquoted_special
def test_quoteforandroid():
"""Test quoting Android DTD definitions."""
assert dtd.quoteforandroid("don't") == r'"don\u0027t"'
assert dtd.quoteforandroid('the "thing"') == r'"the \"thing\""'
def test_unquotefromandroid():
"""Test unquoting Android DTD definitions."""
assert dtd.unquotefromandroid('"Don\\'t show"') == "Don't show"
assert dtd.unquotefromandroid('"Don\\\'t show"') == "Don't show"
assert dtd.unquotefromandroid('"Don\\u0027t show"') == "Don't show"
assert dtd.unquotefromandroid('"A \\"thing\\""') == "A \"thing\""
def test_removeinvalidamp(recwarn):
"""tests the the removeinvalidamps function"""
def tester(actual, expected=None):
if expected is None:
expected = actual
assert dtd.removeinvalidamps("test.name", actual) == expected
# No errors
tester("Valid &entity; included")
tester("Valid &entity.name; included")
tester("Valid Ӓ included")
tester("Valid &entity_name;")
# Errors that require & removal
tester("This & is broken", "This amp is broken")
tester("Mad & & &", "Mad amp &")
dtd.removeinvalidamps("simple.warningtest", "Dimpled &Ring")
assert recwarn.pop(UserWarning)
class TestDTDUnit(test_monolingual.TestMonolingualUnit):
UnitClass = dtd.dtdunit
def test_rich_get(self):
pass
def test_rich_set(self):
pass
class TestDTD(test_monolingual.TestMonolingualStore):
StoreClass = dtd.dtdfile
def dtdparse(self, dtdsource):
"""helper that parses dtd source without requiring files"""
dummyfile = wStringIO.StringIO(dtdsource)
dtdfile = dtd.dtdfile(dummyfile)
return dtdfile
def dtdregen(self, dtdsource):
"""helper that converts dtd source to dtdfile object and back"""
return str(self.dtdparse(dtdsource))
def test_simpleentity(self):
"""checks that a simple dtd entity definition is parsed correctly"""
dtdsource = '<!ENTITY test.me "bananas for sale">\n'
dtdfile = self.dtdparse(dtdsource)
assert len(dtdfile.units) == 1
dtdunit = dtdfile.units[0]
assert dtdunit.entity == "test.me"
assert dtdunit.definition == '"bananas for sale"'
def test_blanklines(self):
"""checks that blank lines don't break the parsing or regeneration"""
dtdsource = '<!ENTITY test.me "bananas for sale">\n\n'
dtdregen = self.dtdregen(dtdsource)
assert dtdsource == dtdregen
def test_simpleentity_source(se
|
jtyr/ansible
|
lib/ansible/modules/command.py
|
Python
|
gpl-3.0
| 13,711
| 0.003502
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: command
short_description: Execute commands on targets
version_added: historical
description:
- The C(command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes.
- The command(s) will not be
processed through the shell, so variables like C($HOSTNAME) and operations
like C("*"), C("<"), C(">"), C("|"), C(";") and C("&") will not work.
Use the M(ansible.builtin.shell) module if you need these features.
- To create C(command) tasks that are easier to read than the ones using space-delimited
arguments, pass parameters
|
using the C(args) L(task keyword,../reference_appendices/playbooks_keywords.html#task)
or use C(cmd) parameter.
- Either a free form command or C(cmd) parameter is required, see the examples.
- For Windows targets, use the M(ansible.windows.win_command) module instead.
options:
free_form:
description:
- The command module takes a free form strin
|
g as a command to run.
- There is no actual parameter named 'free form'.
cmd:
type: str
description:
- The command to run.
argv:
type: list
description:
- Passes the command as a list rather than a string.
- Use C(argv) to avoid quoting values that would otherwise be interpreted incorrectly (for example "user name").
- Only the string (free form) or the list (argv) form can be provided, not both. One or the other must be provided.
version_added: "2.6"
creates:
type: path
description:
- A filename or (since 2.0) glob pattern. If a matching file already exists, this step B(won't) be run.
removes:
type: path
description:
- A filename or (since 2.0) glob pattern. If a matching file exists, this step B(will) be run.
version_added: "0.8"
chdir:
type: path
description:
- Change into this directory before running the command.
version_added: "0.6"
warn:
description:
- (deprecated) Enable or disable task warnings.
- This feature is deprecated and will be removed in 2.14.
- As of version 2.11, this option is now disabled by default.
type: bool
default: no
version_added: "1.8"
stdin:
description:
- Set the stdin of the command directly to the specified value.
version_added: "2.4"
stdin_add_newline:
type: bool
default: yes
description:
- If set to C(yes), append a newline to stdin data.
version_added: "2.8"
strip_empty_ends:
description:
- Strip empty lines from the end of stdout/stderr in result.
version_added: "2.8"
type: bool
default: yes
notes:
- If you want to run a command through the shell (say you are using C(<), C(>), C(|), etc), you actually want the M(ansible.builtin.shell) module instead.
Parsing shell metacharacters can lead to unexpected commands being executed if quoting is not done correctly so it is more secure to
use the C(command) module when possible.
- " C(creates), C(removes), and C(chdir) can be specified after the command.
For instance, if you only want to run a command if a certain file does not exist, use this."
- Check mode is supported when passing C(creates) or C(removes). If running in check mode and either of these are specified, the module will
check for the existence of the file and report the correct changed status. If these are not supplied, the task will be skipped.
- The C(executable) parameter is removed since version 2.4. If you have a need for this parameter, use the M(ansible.builtin.shell) module instead.
- For Windows targets, use the M(ansible.windows.win_command) module instead.
- For rebooting systems, use the M(ansible.builtin.reboot) or M(ansible.windows.win_reboot) module.
seealso:
- module: ansible.builtin.raw
- module: ansible.builtin.script
- module: ansible.builtin.shell
- module: ansible.windows.win_command
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = r'''
- name: Return motd to registered var
command: cat /etc/motd
register: mymotd
# free-form (string) arguments, all arguments on one line
- name: Run command if /path/to/database does not exist (without 'args')
command: /usr/bin/make_database.sh db_user db_name creates=/path/to/database
# free-form (string) arguments, some arguments on separate lines with the 'args' keyword
# 'args' is a task keyword, passed at the same level as the module
- name: Run command if /path/to/database does not exist (with 'args' keyword)
command: /usr/bin/make_database.sh db_user db_name
args:
creates: /path/to/database
# 'cmd' is module parameter
- name: Run command if /path/to/database does not exist (with 'cmd' parameter)
command:
cmd: /usr/bin/make_database.sh db_user db_name
creates: /path/to/database
- name: Change the working directory to somedir/ and run the command as db_owner if /path/to/database does not exist
command: /usr/bin/make_database.sh db_user db_name
become: yes
become_user: db_owner
args:
chdir: somedir/
creates: /path/to/database
# argv (list) arguments, each argument on a separate line, 'args' keyword not necessary
# 'argv' is a parameter, indented one level from the module
- name: Use 'argv' to send a command as a list - leave 'command' empty
command:
argv:
- /usr/bin/make_database.sh
- Username with whitespace
- dbname with whitespace
creates: /path/to/database
- name: Safely use templated variable to run command. Always use the quote filter to avoid injection issues
command: cat {{ myfile|quote }}
register: myoutput
'''
RETURN = r'''
msg:
description: changed
returned: always
type: bool
sample: True
start:
description: The command execution start time
returned: always
type: str
sample: '2017-09-29 22:03:48.083128'
end:
description: The command execution end time
returned: always
type: str
sample: '2017-09-29 22:03:48.084657'
delta:
description: The command execution delta time
returned: always
type: str
sample: '0:00:00.001529'
stdout:
description: The command standard output
returned: always
type: str
sample: 'Clustering node rabbit@slave1 with rabbit@master …'
stderr:
description: The command standard error
returned: always
type: str
sample: 'ls cannot access foo: No such file or directory'
cmd:
description: The command executed by the task
returned: always
type: list
sample:
- echo
- hello
rc:
description: The command return code (0 means success)
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master …']
stderr_lines:
description: The command standard error split in lines
returned: always
type: list
sample: [u'ls cannot access foo: No such file or directory', u'ls …']
'''
import datetime
import glob
import os
import shlex
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.common.collections import is_iterable
def check_command(module, commandline):
arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
'ln': 'state=link', 'mkdir': 'state=directory',
'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'}
commands = {'curl': 'get_url or uri', 'wget': 'get_url or uri',
'svn': 'subversion', 'service': 'service',
'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt',
'tar': 'unarchive', 'unzip': 'unarchive',
|
openstack/oslo.vmware
|
oslo_vmware/_i18n.py
|
Python
|
apache-2.0
| 852
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in
|
compliance with the License. You may obtain
# a copy of the License at
#
# http://www.
|
apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See https://docs.openstack.org/oslo.i18n/latest/user/index.html
"""
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='oslo_vmware')
# The primary translation function using the well-known name "_"
_ = _translators.primary
|
msullivan/advent-of-code
|
2019/setup.py
|
Python
|
mit
| 164
| 0.04878
|
from dis
|
tutils.core import setup, Extension
setup(name = '_intcode',
version = '0.1',
ext_modules = [Extension('_i
|
ntcode', sources = ['_intcode.c'])])
|
apllicationCOM/youtube-dl-api-server
|
youtube_dl_server/youtube_dl/extractor/clipfish.py
|
Python
|
unlicense
| 1,624
| 0.000616
|
from __future__ import unicode_literals
import re
import time
import xml.etree.ElementTree
from .common import InfoExtractor
f
|
rom ..utils import (
ExtractorError,
parse_duration,
)
class ClipfishIE(InfoExtractor):
IE_NAME = 'clipfish'
_VALID_URL = r'^https?://(?:www\.)?clipfish\.de/.*?/video/(?P<id>[0-9]+)/'
_TEST = {
'url': 'http://www.clipfish.de/special/game-trailer/video/396
|
6754/fifa-14-e3-2013-trailer/',
'md5': '2521cd644e862936cf2e698206e47385',
'info_dict': {
'id': '3966754',
'ext': 'mp4',
'title': 'FIFA 14 - E3 2013 Trailer',
'duration': 82,
},
'skip': 'Blocked in the US'
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' %
(video_id, int(time.time())))
doc = self._download_xml(
info_url, video_id, note='Downloading info page')
title = doc.find('title').text
video_url = doc.find('filename').text
if video_url is None:
xml_bytes = xml.etree.ElementTree.tostring(doc)
raise ExtractorError('Cannot find video URL in document %r' %
xml_bytes)
thumbnail = doc.find('imageurl').text
duration = parse_duration(doc.find('duration').text)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
'duration': duration,
}
|
phelmig/django-fastbill
|
django_fastbill/migrations/0004_auto__add_field_customer_deleted.py
|
Python
|
mit
| 10,979
| 0.007469
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Customer.deleted'
db.add_column(u'django_fastbill_customer', 'deleted',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Customer.deleted'
db.delete_column(u'django_fastbill_customer', 'deleted')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_fastbill.article': {
'Meta': {'object_name': 'Article'},
'allow_multiple': ('django.db.models.fields.BooleanField', [], {}),
'article_number': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'checkout_url': ('django.db.models.fields.URLField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'currency_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'description': ('django.db.models.fields.TextField', [], {}),
'is_addon': ('django.db.models.fields.BooleanField', [], {}),
'return_url_cancel': ('django.db.models.fields.URLField', [], {'max_length': '300'}),
'return_url_success': ('django.db.models.fields.URLField', [], {'max_length': '300'}),
'setup_fee': ('django.db.models.fields.FloatField', [], {}),
'subscription_cancellation': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subscription_duration': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subscription_duration_follow': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'subscription_interval': ('django.db.models.fields.CharField', [], {'max
|
_length': '50'}),
'subscription_number_events': ('django.db.models.fields.IntegerField', [], {}),
'subscription_trial': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'title': ('django.db.models.fie
|
lds.CharField', [], {'max_length': '500'}),
'unit_price': ('django.db.models.fields.FloatField', [], {}),
'vat_percent': ('django.db.models.fields.FloatField', [], {})
},
u'django_fastbill.customer': {
'Meta': {'object_name': 'Customer'},
'changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changedata_url': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'customer_ext_uid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'customer_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'customer_number': ('django.db.models.fields.IntegerField', [], {}),
'dashboard_url': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'fastbill_customer'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"})
},
u'django_fastbill.invoice': {
'Meta': {'object_name': 'Invoice'},
'affiliate': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'cash_discount_days': ('django.db.models.fields.IntegerField', [], {}),
'cash_discount_percent': ('django.db.models.fields.FloatField', [], {}),
'changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'currency_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'customer_id': ('django.db.models.fields.IntegerField', [], {}),
'customer_number': ('django.db.models.fields.IntegerField', [], {}),
'days_for_payment': ('django.db.models.fields.IntegerField', [], {}),
'delivery_date': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'document_url': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'due_date': ('django.db.models.fields.DateTimeField', [], {}),
'introtext': ('
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/gdata/test_data.py
|
Python
|
bsd-3-clause
| 87,056
| 0.001861
|
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
XML_ENTRY_1 = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<id> http://www.google.com/test/id/url </id>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<link rel='license'
href='http://creativecommons.org/licenses/by-nc/2.5/rdf'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
TEST_BASE_ENTRY = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<app:control xmlns:app='http://purl.org/atom/app#'>
<app:draft>yes</app:draft>
<gm:disapproved xmlns:gm='http://base.google.com/ns-metadata/1.0'/>
</app:control>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
BIG_FEED = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title type="text">dive into mark</title>
<subtitle type="html">
A <em>lot</em> of effort
went into making this effortless
</subtitle>
<updated>2005-07-31T12:29:29Z</updated>
<id>tag:example.org,2003:3</id>
<link rel="alternate" type="text/html"
hreflang="en" href="http://example.org/"/>
<link rel="self" type="application/atom+xml"
href="http://example.org/feed.atom"/>
<rights>Copyright (c) 2003, Mark Pilgrim</rights>
<generator uri="http://www.example.com/" version="1.0">
Example Toolkit
</generator>
<entry>
<title>Atom draft-07 snapshot</title>
<link rel="alternate" type="text/html"
href="http://example.org/2005/04/02/atom"/>
<link rel="enclosure" type="audio/mpeg" length="1337"
href="http://example.org/audio/ph34r_my_podcast.mp3"/>
<id>tag:example.org,2003:3.2397</id>
<updated>2005-07-31T12:29:29Z</updated>
<published>2003-12-13T08:29:29-04:00</published>
<author>
<name>Mark Pilgrim</name>
<uri>http://example.org/</uri>
<email>f8dy@example.com</email>
</author>
<contributor>
<name>Sam Ruby</name>
</contributor>
<contributor>
<name>Joe Gregorio</name>
</contributor>
<content type="xhtml" xml:lang="en"
xml:base="http://diveintomark.org/">
<div xmlns="http://www.w3.org/1999/xhtml">
<p><i>[Update: The Atom draft is finished.]</i></p>
</div>
</content>
</entry>
</feed>
"""
SMALL_FEED = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>Example Feed</title>
<link href="http://example.org/"/>
<updated>2003-12-13T18:30:02Z</updated>
<author>
<name>John Doe</name>
</author>
<id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>
<entry>
<title>Atom-Powered Robots Run Amok</title>
<link href="http://example.org/2003/12/13/atom03"/>
<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>
<updated>2003-12-13T18:30:02Z</updated>
<summary>Some text.</summary>
</entry>
</feed>
"""
GBASE_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:g='http://base.google.com/ns/1.0' xmlns:batch='http://schemas.google.com/gdata/batch'>
<id>http://www.google.com/base/feeds/snippets</id>
<updated>2007-02-08T23:18:21.935Z</updated>
<title type='text'>Items matching query: digital camera</title>
<link rel='alternate' type='text/html' href='http://base.google.com'>
</link>
<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets'>
</link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/b
|
ase/feeds/snippets?start-index=1&max-results=25&bq=digital+camera'>
</link>
<link rel='next' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets?start-index=26&max-results=25&bq=digital+camera'>
</link>
<generator version='1.0' uri='http://base.google.com'>GoogleBase </generator>
<openSearch:tot
|
alResults>2171885</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/base/feeds/snippets/13246453826751927533</id>
<published>2007-02-08T13:23:27.000Z</published>
<updated>2007-02-08T16:40:57.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='Products'>
</category>
<title type='text'>Digital Camera Battery Notebook Computer 12v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables</title>
<content type='html'>Notebook Computer 12v DC Power Cable - 5.5mm x 2.1mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power portable computers that operate with 12v power and have a 2.1mm power connector (center +) Digital ...</content>
<link rel='alternate' type='text/html' href='http://www.bhphotovideo.com/bnh/controller/home?O=productlist&A=details&Q=&sku=305668&is=REG&kw=DIDCB5092&BI=583'>
</link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets/13246453826751927533'>
</link>
<author>
<name>B&H Photo-Video</name>
<email>anon-szot0wdsq0at@base.google.com</email>
</author>
<g:payment_notes type='text'>PayPal & Bill Me Later credit available online only.</g:payment_notes>
<g:condition type='text'>new</g:condition>
<g:location type='location'>420 9th Ave. 10001</g:location>
<g:id type='text'>305668-REG</g:id>
<g:item_type type='text'>Products</g:item_type>
<g:brand type='text'>Digital Camera Battery</g:brand>
<g:expiration_date type='dateTime'>2007-03-10T13:23:27.000Z</g:expiration_date>
<g:customer_id type='int'>1172711</g:customer_id>
<g:price type='floatUnit'>34.95 usd</g:price>
<g:product_type type='text'>Digital Photography>Camera Connecting Cables</g:product_type>
<g:item_language type='text'>EN</g:item_language>
<g:manufacturer_id type='text'>DCB5092</g:manufacturer_id>
<g:target_country type='text'>US</g:target_country>
<g:weight type='float'>1.0</g:weight>
<g:image_link type='url'>http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305668.jpg&dhm=ffffffff84c9a95e&size=6</g:image_link>
</entry>
<entry>
<id>http://www.google.com/base/feeds/snippets/10145771037331858608</id>
<published>2007-02-08T13:23:27.000Z</published>
<updated>2007-02-08T16:40:57.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='Products'>
</category>
<title type='text'>Digital Camera Battery Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera
|
ayoubg/gem5-graphics
|
Mesa-7.11.2_GPGPU-Sim/src/mapi/glapi/gen/gl_XML.py
|
Python
|
bsd-3-clause
| 24,796
| 0.038877
|
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import libxml2
import re, sys, string
import typeexpr
def parse_GL_API( file_name, factory = None ):
doc = libxml2.readFile( file_name, None, libxml2.XML_PARSE_XINCLUDE + libxml2.XML_PARSE_NOBLANKS + libxml2.XML_PARSE_DTDVALID + libxml2.XML_PARSE_DTDATTR + libxml2.XML_PARSE_DTDLOAD + libxml2.XML_PARSE_NOENT )
ret = doc.xincludeProcess()
if not factory:
factory = gl_item_factory()
api = factory.create_item( "api", None, None )
api.process_element( doc )
# After the XML has been processed, we need to go back and assign
# dispatch offsets to the functions that request that their offsets
# be assigned by the scripts. Typically this means all functions
# that are not part of the ABI.
for func in api.functionIterateByCategory():
if func.assign_offset:
func.offset = api.next_offset;
api.next_offset += 1
doc.freeDoc()
return api
def is_attr_true( element, name ):
"""Read a name value from an element's attributes.
The value read from the attribute list must be either 'true' or
'false'. If the value is 'false', zero will be returned. If the
value is 'true', non-zero will be returned. An exception will be
raised for any other value."""
value = element.nsProp( name, None )
if value == "true":
return 1
elif value == "false":
return 0
else:
raise RuntimeError('Invalid value "%s" for boolean "%s".' % (value, name))
class gl_print_base:
"""Base class of all API pretty-printers.
In the model-view-controller pattern, this is the view. Any derived
class will want to over-ride the printBody, printRealHader, and
printRealFooter methods. Some derived classes may want to over-ride
printHeader and printFooter, or even Print (though this is unlikely).
"""
def __init__(self):
# Name of the script that is generating the output file.
# Every derived class should set this to the name of its
# source file.
self.name = "a"
# License on the *generated* source file. This may differ
# from the license on the script that is generating the file.
# Every derived class should set this to some reasonable
# value.
#
# See license.py for an example of a reasonable value.
self.license = "The license for this file is unspecified."
# The header_tag is the name of the C preprocessor define
# used to prevent multiple inclusion. Typically only
# generated C header files need this to be set. Setting it
# causes code to be generated automatically in printHeader
# and printFooter.
self.header_tag = None
# List of file-private defines that must be undefined at the
# end of the file. This can be used in header files to define
# names for use in the file, then undefine them at the end of
# the header file.
self.undef_list = []
return
def Print(self, api):
self.printHeader()
self.printBody(api)
self.printFooter()
return
def printHeader(self):
"""Print the header associated with all files and call the printRealHeader method."""
print '/* DO NOT EDIT - This file generated automatically by %s script */' \
% (self.name)
print ''
print '/*'
print ' * ' + self.license.replace('\n', '\n * ')
print ' */'
print ''
if self.header_tag:
print '#if !defined( %s )' % (self.header_tag)
print '# define %s' % (self.header_tag)
print ''
self.printRealHeader();
return
def printFooter(self):
"""Print the header associated with all files and call the printRealFooter method."""
self.printRealFooter()
if self.undef_list:
print ''
for u in self.undef_list:
print "# undef %s" % (u)
if self.header_tag:
print ''
print '#endif /* !defined( %s ) */' % (self.header_tag)
def printRealHeader(self):
"""Print the "real" header for the created file.
In the base class, this function is empty. All derived
classes should over-ride this function."""
return
def printRealFooter(self):
"""Print the "real" footer for the created file.
In the base class, this function is empty. All derived
classes should over-ride this function."""
return
def printPure(self):
"""Conditionally define `PURE' function attribute.
Conditionally defines a
|
preprocessor macro `PURE' that wraps
GCC's `pure' function attribute. The conditional code can be
easilly adapted to other compilers that support a similar
feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("PURE")
print """# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
#
|
define PURE __attribute__((pure))
# else
# define PURE
# endif"""
return
def printFastcall(self):
"""Conditionally define `FASTCALL' function attribute.
Conditionally defines a preprocessor macro `FASTCALL' that
wraps GCC's `fastcall' function attribute. The conditional
code can be easilly adapted to other compilers that support a
similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("FASTCALL")
print """# if defined(__i386__) && defined(__GNUC__) && !defined(__CYGWIN__) && !defined(__MINGW32__)
# define FASTCALL __attribute__((fastcall))
# else
# define FASTCALL
# endif"""
return
def printVisibility(self, S, s):
"""Conditionally define visibility function attribute.
Conditionally defines a preprocessor macro name S that wraps
GCC's visibility function attribute. The visibility used is
the parameter s. The conditional code can be easilly adapted
to other compilers that support a similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append(S)
print """# if (defined(__GNUC__) && !defined(__CYGWIN__) && !defined(__MINGW32__)) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590) && defined(__ELF__))
# define %s __attribute__((visibility("%s")))
# else
# define %s
# endif""" % (S, s, S)
return
def printNoinline(self):
"""Conditionally define `NOINLINE' function attribute.
Conditionally defines a preprocessor macro `NOINLINE' that
wraps GCC's `noinline' function attribute. The conditional
code can be easilly adapted to other compilers that support a
similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("NOINLINE")
print """# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
# define NOINLINE __attribute__((noinline))
# else
# define NOINLINE
# endif"""
return
def real_function_name(element):
name = element.nsProp( "name", None )
alias = element.nsProp( "alias", None )
if alias:
return alias
else:
return name
def real_category_name(c):
if re.compile("[1-9][0-9]*[.][0-9]+").match(c):
return "GL_VERSION_" + c.replace(".", "_")
else:
return c
def classify_category(name, number):
"""Based on the category name and number, select a numerical class for it.
Categories are divided into four classes numbered 0 through 3. The
classes are:
0. Co
|
ProjectBabbler/ebird-api
|
tests/validation/test_clean_provisional.py
|
Python
|
mit
| 483
| 0
|
import un
|
ittest
from ebird.api.validation import clean_provisional
class CleanProvisionalTests(unittest.TestCase):
"""Tests for the clean_provisional validation function."""
def test_converts_bool(self):
self.assertEqual("true", clean_provisional(True))
self.assertEqual("false", clean_provisional(False))
def test_converts_integer(self):
self.assertEqual("true", clean_provisional(1))
self.assertEqual("false", clean_provisional(0)
|
)
|
inteos/IBAdmin
|
system/apps.py
|
Python
|
agpl-3.0
| 128
| 0
|
from __future__ import unicode_literals
from django.apps import Ap
|
pConfig
class SystemConfig(AppConfig):
name
|
= 'system'
|
rspavel/spack
|
var/spack/repos/builtin/packages/glm/package.py
|
Python
|
lgpl-2.1
| 666
| 0.001502
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Glm(CMakePackage):
"""OpenGL Mathematics (GLM) is a header only C++ mathematics library for
graphics s
|
oftware based on the OpenGL Shading Language (GLSL) specification
|
"""
homepage = "https://github.com/g-truc/glm"
url = "https://github.com/g-truc/glm/archive/0.9.7.1.tar.gz"
version('0.9.7.1', sha256='285a0dc8f762b4e523c8710fbd97accaace0c61f45bc8be2bdb0deed07b0e6f3')
depends_on('cmake@2.6:', type='build')
|
strets123/pyms
|
Utils/Math.py
|
Python
|
gpl-2.0
| 5,643
| 0.010987
|
"""
Provides mathematical functions
"""
#############################################################################
# #
# PyMS software for processing of metabolomic mass-spectrometry data #
# Copyright (C) 2005-2012 Vladimir Likic #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. #
# #
#############################################################################
import copy, math
from pyms.Utils.Error import error
from pyms.Utils.Utils import is_list, is_number
def median(v):
"""
@summary: Returns a median of a list or numpy array
@param v: Input list or array
@type v: ListType or numpy.core.ndarray
@return: The median of the input list
@rtype: FloatType
@author: Vladimir Likic
"""
if not is_list(v):
error("argument neither list nor array")
local_data = copy.deepcopy(v)
local_data.sort()
N = len(local_data)
if (N % 2) == 0:
# even number of points
K = N/2 - 1
median = (local_data[K] + local_data[K+1])/2.0
else:
# odd number of points
K = (N - 1)/2 - 1
median = local_data[K+1]
return median
def vector_by_step(vstart,vstop,vstep):
"""
@summary: generates a list by using start, stop, and step values
@param vstart: Initial value
@type vstart: A number
@param vstop: Max value
@type vstop: A number
@param vstep: Step
@type vstep: A number
@return: A list generated
@rtype: ListType
@author: Vladimir Likic
|
"""
if not is_number(vstart) or not is_number(vstop) or not is_number(vstep):
error("parameters start, stop, step must be numbers")
v = []
p = vstart
while
|
p < vstop:
v.append(p)
p = p + vstep
return v
def MAD(v):
"""
@summary: median absolute deviation
@param v: A list or array
@type v: ListType, TupleType, or numpy.core.ndarray
@return: median absolute deviation
@rtype: FloatType
@author: Vladimir Likic
"""
if not is_list(v):
error("argument neither list nor array")
m = median(v)
m_list = []
for xi in v:
d = math.fabs(xi - m)
m_list.append(d)
mad = median(m_list)/0.6745
return mad
def amin(v):
"""
@summary: Finds the minimum element in a list or array
@param v: A list or array
@type v: ListType, TupleType, or numpy.core.ndarray
@return: Tuple (maxi, maxv), where maxv is the minimum
element in the list and maxi is its index
@rtype: TupleType
@author: Vladimir Likic
"""
if not is_list(v):
error("argument neither list nor array")
minv = max(v) # built-in max() function
mini = None
for ii in range(len(v)):
if v[ii] < minv:
minv = v[ii]
mini = ii
if mini == None:
error("finding maximum failed")
return mini, minv
def mean(v):
"""
@summary: Calculates the mean
@param v: A list or array
@type v: ListType, TupleType, or numpy.core.ndarray
@return: Mean
@rtype: FloatType
@author: Vladimir Likic
"""
if not is_list(v):
error("argument neither list nor array")
s = 0.0
for e in v:
s = s + e
s_mean = s/float(len(v))
return s_mean
def std(v):
"""
@summary: Calculates standard deviation
@param v: A list or array
@type v: ListType, TupleType, or numpy.core.ndarray
@return: Mean
@rtype: FloatType
@author: Vladimir Likic
"""
if not is_list(v):
error("argument neither list nor array")
v_mean = mean(v)
s = 0.0
for e in v:
d = e - v_mean
s = s + d*d
s_mean = s/float(len(v)-1)
v_std = math.sqrt(s_mean)
return v_std
def rmsd(list1, list2):
"""
@summary: Calculates RMSD for the 2 lists
@param list1: First data set
@type list1: ListType, TupleType, or numpy.core.ndarray
@param list2: Second data set
@type list2: ListType, TupleType, or numpy.core.ndarray
@return: RMSD value
@rtype: FloatType
@author: Qiao Wang
@author: Andrew Isaac
@author: Vladimir Likic
"""
if not is_list(list1):
error("argument neither list nor array")
if not is_list(list2):
error("argument neither list nor array")
sum = 0.0
for i in range(len(list1)):
sum = sum + (list1[i] - list2[i]) ** 2
rmsd = math.sqrt(sum / len(list1))
return rmsd
|
igudym/twango
|
twango/template/default/src/conf/h_third_party_apps.py
|
Python
|
bsd-3-clause
| 115
| 0.026087
|
try:
INSTALLED_APPS
except Name
|
Error:
INSTALLED_APPS=()
#Generated Config - Don't modify ab
|
ove this line
|
phenoxim/nova
|
nova/api/openstack/compute/views/images.py
|
Python
|
apache-2.0
| 6,081
| 0.000493
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
from nova.api.openstack import common
from nova.image import glance
from nova import utils
class ViewBuilder(common.ViewBuilder):
_collection_name = "images"
def basic(self, request, image):
"""Return a dictionary with basic image attributes."""
return {
"image": {
"id": image.get("id"),
"name": image.get("name"),
"links": self._get_links(request,
image["id"],
self._collection_name),
},
}
def show(self, request, image):
"""Return a dictionary with image details."""
image_dict = {
"id": image.get("id"),
"name": image.get("name"),
"minRam": int(image.get("min_ram") or 0),
"minDisk": int(image.get("min_disk") or 0),
"metadata": image.get("properties", {}),
"created": self._format_date(image.get("created_at")),
"updated": self._format_date(image.get("updated_at")),
"status": self._get_status(image),
"progress": self._get_progress(image),
"links": self._get_links(request,
image["id"],
self._collection_name),
}
instance_uuid = image.get("properties", {}).get("instance_uuid")
if instance_uuid is not None:
server_ref = self._get_href_link(request, instance_uuid, 'servers')
image_dict["server"] = {
"id": instance_uuid,
"links": [{
"rel": "self",
"href": server_ref,
},
{
"rel": "bookmark",
"href": self._get_bookmark_link(request,
instance_uuid,
'servers'),
}],
}
auto_disk_config = image_dict['metadata'].get("auto_disk_config", None)
if auto_disk_config is not None:
value = strutils.bool_from_string(auto_disk_config)
image_dict["OS-DCF:diskConfig"] = (
'AUTO' if value else 'MANUAL')
return dict(image=image_dict)
def detail(self, request, images):
"""Show a list of images with details."""
list_func = self.show
coll_name = self._collection_name + '/detail'
return self._list_view(list_func, request, images, coll_name)
def index(self, request, images):
"""Show a list of images with basic attributes."""
list_func = self.basic
coll_name = self._collection_name
return self._list_view(list_func, request, images, coll_name)
def _list_view(self, list_func, request, images, coll_name):
"""Provide a view for a list of images.
:param list_func: Function used to format the image data
:param request: API request
:param images: List of images in dictionary format
:param coll_name: Name of collection,
|
used to generate the next link
for a pagination query
:returns: Image reply data in dictionary format
"""
image_list = [list_func(request, image)["image"] for image in images]
images_links = self._get_collection_links(request, images, coll_name)
images_dict = dict(images=image_list)
if images_links:
images_dict["ima
|
ges_links"] = images_links
return images_dict
def _get_links(self, request, identifier, collection_name):
"""Return a list of links for this image."""
return [{
"rel": "self",
"href": self._get_href_link(request, identifier, collection_name),
},
{
"rel": "bookmark",
"href": self._get_bookmark_link(request,
identifier,
collection_name),
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": self._get_alternate_link(request, identifier),
}]
def _get_alternate_link(self, request, identifier):
"""Create an alternate link for a specific image id."""
glance_url = glance.generate_glance_url(
request.environ['nova.context'])
glance_url = self._update_glance_link_prefix(glance_url)
return '/'.join([glance_url,
self._collection_name,
str(identifier)])
@staticmethod
def _format_date(dt):
"""Return standard format for a given datetime object."""
if dt is not None:
return utils.isotime(dt)
@staticmethod
def _get_status(image):
"""Update the status field to standardize format."""
return {
'active': 'ACTIVE',
'queued': 'SAVING',
'saving': 'SAVING',
'deleted': 'DELETED',
'pending_delete': 'DELETED',
'killed': 'ERROR',
}.get(image.get("status"), 'UNKNOWN')
@staticmethod
def _get_progress(image):
return {
"queued": 25,
"saving": 50,
"active": 100,
}.get(image.get("status"), 0)
|
david672orford/pykarta
|
pykarta/geocoder/massgis.py
|
Python
|
gpl-2.0
| 3,478
| 0.02674
|
# pykarta/geocoder/massgis.py
# Copyright 2013--2019, Trinity College Computing Center
# Last modified: 22 October 2019
from __future__ import print_function
import lxml.etree as ET
from .geocoder_base import GeocoderBase, GeocoderResult, GeocoderError
import pykarta.address
# https://wiki.state.ma.us/confluence/pages/viewpage.action?pageId=451772508
class GeocoderMassGIS(GeocoderBase):
url_server = "gisprpxy.itd.state.ma.us"
url_path = "/MassGISCustomGeocodeLatLongApplication/MassGISCustomGeocodeService.asmx"
delay = 1.0 # no more than one request per second
def FindAddr(self, address, countrycode=None):
result = GeocoderResult(address, "MassGIS")
if address[self.f_state] in ("MA", "CT", "NY", "NH", "VT"): # covers these states in whole or in part
self.FindAddr2(address, result)
if result.coordinates is None:
self.debug(" No match")
return result
def FindAddr2(self, address, result):
query = ET.Element("{http://schemas.xmlsoap.org/soap/envelope/}Envelope",
# This is an LXML feature
nsmap={
"soap":"http://schemas.xmlsoap.org/soap/envelope/",
"xsi":"http://www.w3.org/2001/XMLSchema-instance",
"xsd":"http://www.w3.org/2001/XMLSchema",
}
)
query_body = ET.Element("{http://schemas.xmlsoap.org/soap/envelope/}Body")
query.append(query_body)
query_address = ET.Element("GeocodeAddress", nsmap={None:"http://tempuri.org/"})
query_body.append(query_address)
query_term = ET.Element("Address")
abbr_street = pykarta.address.abbreviate_street(address[self.f_street])
query_term.text = "%s %s" % (address[self.f_house_number], abbr_street)
query_address.append(query_term)
query_term = ET.Element("City")
query_term.text = address[self.f_city]
query_address.append(query_term)
query_term = ET.Element("State")
query_term.text = address[self.f_state]
query_address.append(query_term)
if address[self.f_postal_code] != "":
query_term = ET.Element("ZipCode")
query_term.text = address[self.f_postal_code]
query_address.append(query_term)
# xml_declaration and pretty_print require LXML
query_text = ET.tostring(ET.ElementTree(element=query), encoding="utf-8", xml_declaration=True, pretty_print=True)
#print(query_text)
resp_text = self.get(self.url_path, query=query_text, method="POST", content_type="text/xml")
#print(resp_text)
try:
tree = ET.XML(resp_text)
except:
self.debug(" Invalid response")
return result
self.debug_indented(ET.tostring(tree, encoding="utf-8", pretty_print=True))
match = tree.find(".//{http://tempuri.org/}GeocodeAddressResult")
score = match.find("{http://tempuri.org/}Score")
if score is not None:
score = score.text
matched_address = match.find("{http://tempuri.org/}MatchedAddress").text
lat = float(match.find("{http://tempuri.org/}Lat
|
").text)
lon = float(match.find("{http://tempuri.org/}Long").text)
#print(score, lat, lon)
if score == "100" and matched_address.startswith("%s %s," % (address[self.f_house_number], abbr_street.upper())):
result.coordinates = (lat, lon)
result.precision = "INTERPOLATED"
e
|
lse:
result.alternative_addresses.append(matched_address)
if __name__ == "__main__":
gc = GeocoderMassGIS()
gc.debug_enabled = True
print(gc.FindAddr(["457","Union Street","","West Springfield","MA",""]))
#print(gc.FindAddr(["10","Improbable Street","","Westfield","MA","01085"]))
#print gc.FindAddr(["32","Park Avenue Court","","West Springfield","MA",""])
|
kernsuite-debian/lofar
|
CEP/Calibration/ExpIon/src/__init__.py
|
Python
|
gpl-3.0
| 1,065
| 0.001878
|
# -*- coding: iso-8859-1 -*-
# __init__.py: Top level .py file for python solution analysis tools.
#
# Copyright (C) 2010
# ASTRON (
|
Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the
|
terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id: __init__.py 12729 2010-03-24 13:39:59Z vdtol $
#from ionosphere import *
__all__ = ['ionosphere', 'parmdbmain']
|
jag1g13/lammps
|
lib/gpu/Install.py
|
Python
|
gpl-2.0
| 5,201
| 0.013459
|
#!/usr/bin/env python
# Install.py tool to build the GPU library
# used to automate the steps described in the README file in this dir
from __future__ import print_function
import sys,os,subprocess
# help message
help = """
Syntax from src dir: make lib-gpu args="-m machine -h hdir -a arch -p precision -e esuffix -m -o osuffix"
Syntax from lib dir: python Install.py -m machine -h hdir -a arch -p precision -e esuffix -m -o osuffix
specify one or more options, order does not matter
copies an existing Makefile.machine in lib/gpu to Makefile.auto
optionally edits these variables in Makefile.auto:
CUDA_HOME, CUDA_ARCH, CUDA_PRECISION, EXTRAMAKE
optionally uses Makefile.auto to build the GPU library -> libgpu.a
and to copy a Makefile.lammps.esuffix -> Makefile.lammps
optionally copies Makefile.auto to a new Makefile.osuffix
-m = use Makefile.machine as starting point, copy to Makefile.auto
default machine = linux
-h = set CUDA_HOME variable in Makefile.auto to hdir
hdir = path to NVIDIA Cuda software, e.g. /usr/local/cuda
-a = set CUDA_ARCH variable in Makefile.auto to arch
use arch = 20 for Tesla C2050/C2070 (Fermi) (deprecated as of CUDA 8.0)
or GeForce GTX 580 or similar
use arch = 30 for Tesla K10 (Kepler)
use arch = 35 for Tesla K40 (Kepler) or GeForce GTX Titan or similar
use arch = 37 for Tesla dual K80 (Kepler)
use arch = 60 for Tesla P100 (Pascal)
-p = set CUDA_PRECISION variable in Makefile.auto to precision
use precision = double or mixed or single
-e = set EXTRAMAKE variable in Makefile.auto to Makefile.lammps.esuffix
-b = make the GPU library using Makefile.auto
first performs a "make clean"
then produces libgpu.a if successful
also copies EXTRAMAKE file -> Makefile.lammps
-e can set which Makefile.lammps.esuffix file is copied
-o = copy final Makefile.auto to Makefile.osuffix
Examples:
make lib-gpu args="-b" # build GPU lib with default Makefile.linux
make lib-gpu args="-m xk7 -p single -o xk7.single" # create new Makefile.xk7.single, altered for single-precision
make lib-gpu args="-m mpi -a 35 -p single -o mpi.mixed -b" # create new Makefile.mpi.mixed, also build GPU lib with these settings
"""
# print error message or help
def error(str=None):
if not str: print(help)
else: print("ERROR",str)
sys.exit()
# parse args
args = sys.argv[1:]
nargs = len(args)
if nargs == 0: error()
isuffix = "linux"
hflag = aflag = pflag = eflag = 0
makeflag = 0
outflag = 0
iarg = 0
while iarg < nargs:
if args[iarg] == "-m":
if iarg+2 > nargs: error()
isuffix = args[iarg+1]
iarg += 2
elif args[iarg] == "-h":
if iarg+2 > nargs: error()
hflag = 1
hdir = args[iarg+1]
iarg += 2
elif args[iarg] == "-a":
if iarg+2 > nargs: error()
aflag = 1
arch = args[iarg+1]
iarg += 2
elif args[iarg] == "-p":
if iarg+2 > nargs: error()
pflag = 1
precision = args[iarg+1]
iarg += 2
elif args[iarg] == "-e":
if iarg+2 > nargs: error()
eflag = 1
lmpsuffix = args[iarg+1]
iarg += 2
elif args[iarg] == "-b":
makeflag = 1
iarg += 1
elif args[iarg] == "-o":
if iarg+2 > nargs: error()
outflag = 1
osuffix = args[iarg+1]
iarg += 2
else: error()
if pflag:
if precision == "double": precstr = "-D_DOUBLE_DOUBLE"
elif precision == "mixed": precstr = "-D_SINGLE_DOUBLE"
elif precision == "single": precstr = "-D_SINGLE_SINGLE"
else: error("Invalid precision se
|
tting")
# create Makefile.auto
# reset EXTRAMAKE, CUDA_HOME, CUDA_ARCH, CUDA_PRECISION if requested
if not os.path.exists("Makefile.%s" % isuffix):
error("lib/gpu/Makefile.%s does not exist" % isuffix)
lines = open("Makefile.%s" % isuffix,'r').readlines()
f
|
p = open("Makefile.auto",'w')
for line in lines:
words = line.split()
if len(words) != 3:
fp.write(line)
continue
if hflag and words[0] == "CUDA_HOME" and words[1] == '=':
line = line.replace(words[2],hdir)
if aflag and words[0] == "CUDA_ARCH" and words[1] == '=':
line = line.replace(words[2],"-arch=sm_%s" % arch)
if pflag and words[0] == "CUDA_PRECISION" and words[1] == '=':
line = line.replace(words[2],precstr)
if eflag and words[0] == "EXTRAMAKE" and words[1] == '=':
line = line.replace(words[2],"Makefile.lammps.%s" % lmpsuffix)
fp.write(line)
fp.close()
# perform make
# make operations copies EXTRAMAKE file to Makefile.lammps
if makeflag:
print("Building libgpu.a ...")
cmd = "rm -f libgpu.a"
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
cmd = "make -f Makefile.auto clean; make -f Makefile.auto"
txt = subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
print(txt.decode('UTF-8'))
if not os.path.exists("libgpu.a"):
error("Build of lib/gpu/libgpu.a was NOT successful")
if not os.path.exists("Makefile.lammps"):
error("lib/gpu/Makefile.lammps was NOT created")
# copy new Makefile.auto to Makefile.osuffix
if outflag:
print("Creating new Makefile.%s" % osuffix)
cmd = "cp Makefile.auto Makefile.%s" % osuffix
subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True)
|
Erotemic/plottool
|
plottool_ibeis/interact_annotations.py
|
Python
|
apache-2.0
| 52,275
| 0.001607
|
"""
Interactive tool to draw mask on an image or image-like array.
TODO:
* need concept of subannotation
* need to take options on a right click of an annotation
* add support for arbitrary polygons back in .
* rename species_list to label_list or category_list
* Just use metadata instead of species / category / label
# Need to incorporate parts into metadata
Notes:
3. Change bounding box and update continuously to the original image the
new ANNOTATIONs
2. Make new window and frames inside, double click to pull up normal window
with editing start with just taking in 6 images and ANNOTATIONs
1. ANNOTATION ID number, then list of 4 tuples
python -m utool.util_inspect check_module_usage --pat="interact_annotations.py"
References:
Adapted from matplotlib/examples/event_handling/poly_editor.py
Jan 9 2014: taken from: https://gist.github.com/tonysyu/3090704
CommandLine:
python -m plottool_ibeis.interact_annotations --test-test_interact_annots --show
"""
from __future__ import absolute_import, division, print_function
import six
import re
import numpy as np
try:
import vtool_ibeis as vt
except ImportError:
pass
import utool as ut
import itertools as it
import matplotlib as mpl
from six.moves import zip, range
from plottool_ibeis import draw_func2 as df2
from plottool_ibeis import abstract_interaction
print, rrr, profile = ut.inject2(__name__)
DEFAULT_SPECIES_TAG = '____'
# FIXE THESE TO BE GENERIC
ACCEPT_SAVE_HOTKEY = None # 'ctrl+a'
ADD_RECTANGLE_HOTKEY = 'ctrl+a' # 'ctrl+d'
ADD_RECTANGLE_FULL_HOTKEY = 'ctrl+f'
DEL_RECTANGLE_HOTKEY = 'ctrl+d' # 'ctrl+r'
TOGGLE_LABEL_HOTKEY = 'ctrl+t'
HACK_OFF_SPECIES_TYPING = True
if HACK_OFF_SPECIES_TYPING:
ADD_RECTANGLE_HOTKEY = 'a' # 'ctrl+d'
ADD_RECTANGLE_FULL_HOTKEY = 'f'
DEL_RECTANGLE_HOTKEY = 'd' # 'ctrl+r'
TOGGLE_LABEL_HOTKEY = 't'
NEXT_IMAGE_HOTKEYS = ['right', 'pagedown']
PREV_IMAGE_HOTKEYS = ['left', 'pageup']
TAU = np.pi * 2
class AnnotPoly(mpl.patches.Polygon, ut.NiceRepr):
"""
Helper to represent an annotation polygon
ibeis --aidcmd='Interact image' --aid=1
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.interact_annotations import * # NOQA
>>> verts = vt.verts_from_bbox([0, 0, 10, 10])
>>> poly = AnnotPoly(None, 0, verts, 0, '____')
"""
def __init__(poly, ax, num, verts, theta, species, fc=(0, 0, 0),
line_color=(1, 1, 1), line_width=4, is_orig=False,
|
metadata=None, valid_species=None, manager=None):
super(AnnotPoly, poly).__init__(verts, animated=True, fc=fc, ec='none',
|
alpha=0)
poly.manager = manager
# Ensure basecoords consistency
poly.basecoords = vt.verts_from_bbox(vt.bbox_from_verts(poly.xy))
#poly.basecoords = poly.xy
poly.num = num
poly.is_orig = is_orig
poly.theta = theta
poly.metadata = metadata
poly.valid_species = valid_species
poly.tab_list = valid_species
# put in previous text and tabcomplete list for autocompletion
poly.tctext = ''
poly.tcindex = 0
poly.anchor_idx = 2
poly.child_polys = {}
# Display stuff that should be removed from constructor
poly.xy = calc_display_coords(poly.basecoords, poly.theta)
poly.lines = poly._make_lines(line_color, line_width)
poly.handle = poly._make_handle_line()
poly.species = species
if ax is not None:
poly.axes_init(ax)
def axes_init(poly, ax):
species = poly.species
metadata = poly.metadata
if isinstance(metadata, ut.LazyDict):
metadata_ = ut.dict_subset(metadata, metadata.cached_keys())
else:
metadata_ = metadata
poly.species_tag = ax.text(
#tagpos[0], tagpos[1],
0, 0,
species,
bbox={'facecolor': 'white', 'alpha': .8},
verticalalignment='top',
)
poly.metadata_tag = ax.text(
0, 0,
#tagpos[0] + 5, tagpos[1] + 80,
ut.repr3(metadata_, nobr=True),
bbox={'facecolor': 'white', 'alpha': .7},
verticalalignment='top',
)
# ???
poly.species_tag.remove() # eliminate "leftover" copies
poly.metadata_tag.remove()
#
poly.update_display_coords()
def move_to_back(poly):
# FIXME: doesnt work exactly
# Probalby need to do in the context of other polys
zorder = 0
poly.set_zorder(zorder)
poly.lines.set_zorder(zorder)
poly.handle.set_zorder(zorder)
def __nice__(poly):
return '(num=%r)' % (poly.num)
def add_to_axis(poly, ax):
ax.add_patch(poly)
ax.add_line(poly.lines)
ax.add_line(poly.handle)
def remove_from_axis(poly, ax):
poly.remove()
poly.lines.remove()
poly.handle.remove()
def draw_self(poly, ax, show_species_tags=False, editable=True):
ax.draw_artist(poly)
if not editable and poly.lines.get_marker():
poly.lines.set_marker('')
elif editable and not poly.lines.get_marker():
poly.lines.set_marker('o')
ax.draw_artist(poly.lines)
if editable:
ax.draw_artist(poly.handle)
if editable and show_species_tags:
# Hack to fix matplotlib 1.5 bug
poly.species_tag.figure = ax.figure
poly.metadata_tag.figure = ax.figure
ax.draw_artist(poly.species_tag)
ax.draw_artist(poly.metadata_tag)
def _make_lines(poly, line_color, line_width):
""" verts - list of (x, y) tuples """
_xs, _ys = list(zip(*poly.xy))
color = np.array(line_color)
marker_face_color = line_color
line_kwargs = {'lw': line_width, 'color': color,
'mfc': marker_face_color}
lines = mpl.lines.Line2D(_xs, _ys, marker='o', alpha=1, animated=True,
**line_kwargs)
return lines
def _make_handle_line(poly):
_xs, _ys = list(zip(*poly.calc_handle_display_coords()))
line_width = 4
line_color = (0, 1, 0)
color = np.array(line_color)
marker_face_color = line_color
line_kwargs = {'lw': line_width, 'color': color, 'mfc': marker_face_color}
lines = mpl.lines.Line2D(_xs, _ys, marker='o', alpha=1, animated=True,
**line_kwargs)
return lines
def calc_tag_position(poly):
r"""
CommandLine:
python -m plottool_ibeis.interact_annotations --test-calc_tag_position --show
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.interact_annotations import * # NOQA
>>> poly = ut.DynStruct()
>>> poly.basecoords = vt.verts_from_bbox([0, 0, 400, 400], True)
>>> poly.theta = 0
>>> poly.xy = vt.verts_from_bbox([0, 0, 400, 400], True)
>>> tagpos = poly.calc_tag_position()
>>> print('tagpos = %r' % (tagpos,))
"""
points = [[
max(list(zip(*poly.basecoords))[0]),
min(list(zip(*poly.basecoords))[1])
]]
tagpos = rotate_points_around(points, poly.theta, *points_center(poly.xy))[0]
return tagpos
def calc_handle_display_coords(poly):
img_h = poly.manager.img.shape[0]
handle_length = img_h // 32
#MIN_HANDLE_LENGTH = 25
#handle_length = MIN_HANDLE_LENGTH
#handle_length = max(MIN_HANDLE_LENGTH, (h / 4))
cx, cy = points_center(poly.xy)
w, h = vt.get_pointset_extent_wh(np.array(poly.basecoords))
x0, y0 = cx, (cy - (h / 2)) # start at top edge
x1, y1 = (x0, y0 - handle_length)
pts = [(x0, y0), (x1, y1)]
pts = rotate_points_around(pts, poly.theta, cx, cy)
return pts
def update_color(poly, selected=False, editing_parts=False):
if editing_parts:
|
TedaLIEz/sentry
|
tests/sentry/api/endpoints/test_project_tagkey_values.py
|
Python
|
bsd-3-clause
| 862
| 0.00116
|
from
|
__future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import TagKey, TagValue
from sentry.testutils import APITestCase
class ProjectTagKeyValuesTest(APITestCase):
def test_simple(self):
project = self.create_project()
tagkey = TagKey.objects.create(project=project, key='foo')
tagvalue = TagValue.obje
|
cts.create(project=project, key='foo', value='bar')
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-tagkey-values', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
'key': tagkey.key,
})
response = self.client.get(url)
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['value'] == 'bar'
|
CalthorpeAnalytics/urbanfootprint
|
footprint/main/models/analysis_module/agriculture_module/agriculture_updater_tool.py
|
Python
|
gpl-3.0
| 9,307
| 0.002579
|
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
import logging
import datetime
from django.utils.timezone import utc
from footprint.main.managers.geo_inheritance_manager import GeoInheritanceManager
from footprint.main.models.analysis_module.analysis_tool import AnalysisTool
from footprint.main.models.config.scenario import BaseScenario, FutureScenario
from footprint.main.models.geospatial.db_entity_keys import DbEntityKey
from footprint.main.models.presentation.layer.layer import Layer
from footprint.main.utils.utils import timestamp
from footprint.utils.websockets import send_message_to_client
from tilestache_uf.utils import invalidate_feature_cache
logger = logging.getLogger(__name__)
__author__ = 'calthorpe_analytics'
class AgricultureUpdaterTool(AnalysisTool):
objects = GeoInheritanceManager()
class Meta(object):
app_label = 'main'
abstract = False
def test_agriculture_core(self, **kwargs):
self.agriculture_analysis(**kwargs)
ANALYSIS_FIELDS = ["gross_net_pct",
"built_form_key",
"built_form_id",
"density_pct",
"acres_gross",
"crop_yield",
"market_value",
"production_cost",
"water_consumption",
"labor_force",
"truck_trips"]
def progress(self, proportion, **kwargs):
send_message_to_client(
kwargs['user'].id,
dict(
event='postSavePublisherProportionCompleted',
job_id=str(kwargs['job'].hashid),
config_entity_id=self.config_entity.id,
ids=[kwargs['analysis_module'].id],
class_name='AnalysisModule',
key=kwargs['analysis_module'].key,
proportion=proportion))
def update_dependent_scenarios(self, base_features, scenario):
if isinstance(scenario, BaseScenario):
future_scenarios = FutureScenario.objects.filter(parent_config_entity=scenario.parent_config_entity_subclassed)
logger.info("Updating dependent scenarios {0} of {1}".format(future_scenarios, scenario))
for future_scenario in future_scenarios:
agriculture_feature_class = future_scenario.db_entity_feature_class(DbEntityKey.FUTURE_AGRICULTURE)
future_features = agriculture_feature_class.objects.filter(
id__in=base_features,
updater__isnull=True
)
logger.info("Updating {0} features of {1}".format(future_features.count(), future_scenario))
updated_built_forms = []
for feature in future_features.iterator():
base_feature = base_features.get(id=feature.id)
if base_feature.built_form_id != feature.built_form_id:
updated_built_forms.append(feature)
base_attributes = dict(
gross_net_pct=base_feature.gross_net_pct,
built_form_key=base_feature.built_form_key,
built_form_id=base_feature.built_form_id,
density_pct=base_feature.density_pct,
acres_gross=base_feature.acres_gross,
crop_yield=base_feature.crop_yield,
market_value=base_feature.market_value,
production_cost=base_feature.production_cost,
water_consumption=base_feature.water_consumption,
labor_force=base_feature.labor_force,
truck_trips=base_feature.truck_trips,
)
for attr, value in base_attributes.iteritems():
setattr(feature, attr, value)
feature.save(update_fields=self.ANALYSIS_FIELDS)
layer = Layer.objects.filter(presentation__config_entity=agriculture_feature_class.config_entity,
db_entity_interest__db_entity__key=agriculture_feature_class.db_entity_key)[0]
if updated_built_forms:
for key in layer.keys:
# clear tilestache cache for updated dependencies
invalidate_feature_cache(key, updated_built_forms)
def update(self, **kwargs):
scenario = self.config_entity.subclassed
logger.debug('{0}:Starting Agriculture Core Analysis for {1}'.format(timestamp(), self.config_entity))
if isinstance(scenario, BaseScenario):
agriculture_db_entity_key = DbEntityKey.BASE_AGRICULTURE_CANVAS
elif isinstance(scenario, FutureScenario):
agriculture_db_entity_key = DbEntityKey.FUTURE_AGRICULTURE_CANVAS
else:
raise Exception("Config Entity is not a Future or Base Scenario, cannot run AgricultureCore.")
ids = kwargs.get('ids', None)
agriculture_feature_class = self.config_entity.db_entity_feature_class(agriculture_db_entity_key)
if ids:
features = agriculture_feature_class.objects.filter(id__in=ids)
else:
features = agriculture_feature_class.objects.filter(built_form__isnull=False)
feature_count = features.count()
if not feature_count:
logger.info("No features to process!")
return
logger.debug("Processing {0} features...".format(feature_count))
iterator_start = datetime.datetime.utcnow().replace(tzinfo=utc)
self.progress(0.05, **kwargs)
if feature_count <= 36:
increment_portion = (.9 / feature_count) + .001
equal_portion = 1
else:
increment_portion = .05
equal_portion = int((feature_count - 1) / 18)
i = 1
for feature in features.iterator():
if i % equal_portion == 0:
self.progress(increment_portion, **kwargs)
if not feature.built_form:
feature.built_form_key = None
feature.crop_yield = 0
feature.market_value = 0
feature.production_cost = 0
feature.water_consumption = 0
|
feature.labor_force = 0
feature.truck_trips = 0
else:
applied_acres = feature.acres_gross * feature.density_pct * feature.dev_pct
agriculture_attribute_set = feature.built_form.resolve_built_form(feature.built_form).agriculture_attribute_set
f
|
eature.built_form_key = feature.built_form.key
feature.crop_yield = agriculture_attribute_set.crop_yield * applied_acres
feature.market_value = agriculture_attribute_set.unit_price * feature.crop_yield
feature.production_cost = agriculture_attribute_set.cost * applied_acres
feature.water_consumption = agriculture_attribute_set.water_consumption * applied_acres
feature.labor_force = agriculture_attribute_set.labor_input * applied_acres
feature.truck_trips = agriculture_attribute_set.truck_trips * applied_acres
feature.save(update_fields=self.ANALYSIS_FIELDS)
i += 1
total_time = datetime.datetime.utcnow().replace(tzinfo=utc) - iterator_start
logger.debug("Processed {0} features in {1}: {2} per feature".format(
feature_count, total_time, total_time/feature_count
))
self.progress(.9, **kwargs)
logger.debug('{0}:Finished Agriculture Core Analysis for {1} '.format(timestamp(), s
|
dhoffman34/django
|
django/contrib/auth/models.py
|
Python
|
bsd-3-clause
| 17,902
| 0.001061
|
from __future__ import unicode_literals
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
from django.core import validators
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils.crypto import get_random_string, salted_hmac
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib import auth
from django.contrib.auth.hashers import (
check_password, make_password, is_password_usable)
from django.contrib.auth.signals import user_logged_in
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import python_2_unicode_compatible
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
user_logged_in.connect(update_last_login)
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label,
model),
)
@python_2_unicode_compatible
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = models.CharField(_('name'), max_length=255)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __str__(self):
return "%s | %s | %s" % (
six.text_type(self.content_type.app_label),
six.text_type(self.content_type),
six.text_type(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission,
verbose_name=_('permissions'), blank=True)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class BaseUserManager(models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the address by lowercasing the domain part of the email
address.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
class UserManager(BaseUserManager):
def _create_user(self, username, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False, False,
**extra_fields)
def create_superuser(self, username, email, passw
|
ord, **extra_fields):
return self._create_user(username, email, password, True, True,
**extra_fields)
@python_2_unicode_compatible
class AbstractBaseUser(models.Model):
password = models.CharField(_('password'), max_length=128)
last_login = models.DateTimeField(_('last login'), default=timezone.now)
is_active = True
REQUIRED_FIELDS = []
class Meta:
abstract = True
def get_username(self):
"Return the ident
|
ifying username for this User"
return getattr(self, self.USERNAME_FIELD)
def __str__(self):
return self.get_username()
def natural_key(self):
return (self.get_username(),)
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
|
qsheeeeen/Self-Driving-Car
|
rl_toolbox/policy/shared.py
|
Python
|
mit
| 8,677
| 0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from ..net import VAE
from ..util.common import batch_to_sequence, sequence_to_batch
from ..util.init import orthogonal_init
class _CNNBase(nn.Module):
def __init__(self):
super(_CNNBase, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(16, 32, kernel_size=4, stride=2)
self.fc = nn.Linear(3200, 256)
def forward(self, x):
h1 = F.relu(self.conv1(x))
h2 = F.relu(self.conv2(h1))
h2 = h2.view(h2.size(0), -1)
return F.relu(self.fc(h2))
class _RNNBase(nn.Module):
def __init__(self, input_size, output_size):
super(_RNNBase, self).__init__()
self.rnn = nn.LSTM(input_size, output_size, batch_first=True)
# self.rnn = nn.GRU(input_size, output_size, batch_first=True)
self.hidden = None
def forward(self, x):
if self.hidden is None:
out, self.hidden = self.rnn(x)
else:
c, h = self.hidden
self.hidden = c.detach(), h.detach()
out, self.hidden = self.rnn(x, self.hidden)
return out
class VAEPolicy(nn.Module):
def __init__(self, input_shape, output_shape):
super(VAEPolicy, self).__init__()
self.pd = None
z_size = 256
self.visual = VAE(z_size, add_noise=False)
self.mean_head = nn.Linear(z_size, output_shape[0])
self.log_std_head = nn.Parameter(torch.zeros(output_shape[0]))
self.value_head = nn.Linear(z_size, 1)
# self.mean_head.apply(orthogonal_init([nn.Linear], 'linear'))
# self.value_head.apply(orthogonal_init([nn.Linear], 'linear'))
def forward(self, x):
with torch.no_grad():
feature, _, _ = self.visual.encode(x)
mean = self.mean_head(feature.detach())
std = self.log_std_head.expand_as(mean).exp()
self.pd = Normal(mean, std)
action = self.pd.sample() if self.training else mean
value = self.value_head(feature.detach())
return action, value
@property
def recurrent(self):
return False
@property
def name(self):
return 'VAEPolicy'
class VAELSTMPolicy(nn.Module):
def __init__(self, input_shape, output_shape):
super(VAELSTMPolicy, self).__init__()
self.pd = None
z_size = 128
self.visual = VAE(z_size, add_noise=False)
self.rnn = _RNNBase(z_size, z_size)
self.mean_head = nn.Linear(z_size, output_shape[0])
self.log_std_head = nn.Parameter(torch.zeros(output_shape[0]))
self.value_head = nn.Linear(z_size, 1)
for param in self.visual.parameters():
param.requires_grad = False
self.value_head.apply(orthogonal_init([nn.Linear], 'linear'))
self.mean_head.apply(orthogonal_init([nn.Linear], 'tanh'))
def forward(self, x):
with torch.no_grad:
feature, _, _ = self.visual.encode(x)
feature = batch_to_sequence(feature, self.num_steps)
memory = sequence_to_batch(self.rnn(feature))
mean = self.mean_head(memory)
std = self.log_std_head.expand_as(mean).exp()
self.pd = Normal(mean, std)
action = self.pd.sample() if self.training else mean
value = self.value_head(memory)
return action, value
@property
def num_steps(self):
return 8
@property
def recurrent(self):
return True
@property
def name(self):
return 'VAELSTMPolicy'
class CNNPolicy(nn.Module):
def __init__(self, input_shape, output_shape):
super(CNNPolicy, self).__init__()
self.pd = None
self.cnn = _CNNBase()
size = self.cnn.fc.out_features
self.mean_head = nn.Linear(size, output_shape[0])
self.log_std_head = nn.Parameter(torch.zeros(output_shape[0]))
self.value_head = nn.Linear(size, 1)
# self.cnn.apply(orthogonal_init([nn.Linear], 'relu'))
# self.mean_head.apply(orthogonal_init([nn.Linear], 'linear'))
# self.value_head.apply(orthogonal_init([nn.Linear], 'linear'))
def forward(self, x):
feature = self.cnn(x)
mean = self.mean_head(feature)
std = self.log_std_head.expand_as(mean).exp()
self.pd = Normal(mean, std)
action = self.pd.sample() if self.training else mean
value = self.value_head(feature)
return action, value
@property
def recurrent(self):
return False
@property
def name(self):
return 'CNNPolicy'
class CNNLSTMPolicy(nn.Module):
def __init__(self, input_shape, output_shape):
super(CNNLSTMPolicy, self).__init__()
self.pd = None
self.cnn = _CNNBase()
size = self.cnn.fc.out_features
self.rnn = _RNNBase(size, size)
self.mean_head = nn.Linear(size, output_shape[0])
self.log_std_head = nn.Parameter(torch.ones(output_shape[0]))
self.value_head = nn.Linear(size, 1)
# self.value_head.apply(orthogonal_init([nn.Linear], 'linear'))
# self.mean_head.apply(orthogonal_init([nn.Linear], 'tanh'))
def forward(self, x):
feature = self.cnn(x)
feature = batch_to_sequence(feature, self.num_steps)
memory = sequence_to_batch(self.rnn(feature))
|
mean = self.mean_head(memory)
std = self.log_std_head.expand_as(mean).exp()
self.pd = Normal(mean, std)
action = self.pd.sample() if self.training else mean
value = self.value_head(memory)
return
|
action, value
@property
def num_steps(self):
return 8
@property
def recurrent(self):
return True
@property
def name(self):
return 'CNNLSTMPolicy'
class MLPPolicy(nn.Module):
def __init__(self, input_shape, output_shape):
super(MLPPolicy, self).__init__()
self.pd = None
self.pi_fc1 = nn.Linear(input_shape[0], 64)
self.pi_fc2 = nn.Linear(64, 64)
self.vf_fc1 = nn.Linear(input_shape[0], 64)
self.vf_fc2 = nn.Linear(64, 64)
self.mean_head = nn.Linear(64, output_shape[0])
self.log_std_head = nn.Parameter(torch.zeros(output_shape[0]))
self.value_head = nn.Linear(64, 1)
self.apply(orthogonal_init([nn.Linear], 'tanh'))
self.mean_head.apply(orthogonal_init([nn.Linear], 'linear'))
self.value_head.apply(orthogonal_init([nn.Linear], 'linear'))
def forward(self, x):
pi_h1 = F.tanh(self.pi_fc1(x))
pi_h2 = F.tanh(self.pi_fc2(pi_h1))
mean = self.mean_head(pi_h2)
std = self.log_std_head.expand_as(mean).exp()
self.pd = Normal(mean, std)
action = self.pd.sample() if self.training else mean
vf_h1 = F.tanh(self.vf_fc1(x))
vf_h2 = F.tanh(self.vf_fc2(vf_h1))
value = self.value_head(vf_h2)
return action, value
@property
def recurrent(self):
return False
@property
def name(self):
return 'MLPPolicy'
class MLPLSTMPolicy(nn.Module):
def __init__(self, input_shape, output_shape):
super(MLPLSTMPolicy, self).__init__()
self.pd = None
self.pi_fc = nn.Linear(input_shape[0], 64)
self.pi_rnn = _RNNBase(64, 64)
self.vf_fc = nn.Linear(input_shape[0], 64)
self.vf_rnn = _RNNBase(64, 64)
self.mean_head = nn.Linear(64, output_shape[0])
self.log_std_head = nn.Parameter(torch.zeros(output_shape[0]))
self.value_head = nn.Linear(64, 1)
self.pi_fc.apply(orthogonal_init([nn.Linear], 'tanh'))
self.vf_fc.apply(orthogonal_init([nn.Linear], 'tanh'))
self.value_head.apply(orthogonal_init([nn.Linear], 'linear'))
def forward(self, x):
pi_h1 = F.tanh(self.pi_fc(x))
pi_h1 = batch_to_sequence(pi_h1, self.num_steps)
pi_h2 = sequence_to_batch(self.pi_rnn(pi_h1))
pi_h2 = F.tanh(pi_h2)
mean = self.mean_head(pi_h2)
std = self.log_std_head.expand_as(mean).exp()
vf_h1 = F.tanh(self.vf_fc
|
CN-UPB/OpenBarista
|
utils/decaf-utils-rpc/tests/unittests/InlineCallbacks.py
|
Python
|
mpl-2.0
| 468
| 0.002137
|
__author__ = 'thgoette'
from BasicTest import BasicTest, wait
import un
|
ittest
class InlineCallbacks(BasicTest):
@wait
def test_inlinecallbacks(self):
print "Starting inlineCallback Test "
def callback(result):
print "Callback: " + str(result)
self.assertEqual(result, 50)
d = sel
|
f.caller.call("plus5times10", 0)
d.addBoth(callback)
return d
if __name__ == '__main__':
unittest.main()
|
PapenfussLab/Srtools
|
bin/fastq_split.py
|
Python
|
artistic-2.0
| 1,113
| 0.006289
|
#!/usr/bin/env python
"""
fastq_split.py [-n|--num_files N_FILES] <input filename> <output directory>
"""
import os
import sys
import math
from srt.fastq import *
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-n", "--num_files", dest="num_files",
help="Number of output files", type="int", default=5)
(options, args) = parser.parse_args()
input_filename = args[0]
output_directory = args[1]
if options.n
|
um_files<=0:
print "Number of files must be > 0"
sys.exit(-1)
num_places = 1+int(math.log10(options.num_files))
# Define output filename format
_ = os.path.split(input_filename)[-1]
base = os.path.splitext(_)[0]
ext = os.path.splitext(_)[1]
format = os.path.join(output_directory, "%s_%%0.%ii%s") % (base, num_places, ext)
# Open files
o
|
utput_file = []
for i in xrange(options.num_files):
output_filename = format % (i+1)
output_file.append(FastqFile(output_filename, "w"))
# Split reads
i = 0
for h,s,q in FastqFile(input_filename):
output_file[i].write(h,s,q)
i = (i+1) % options.num_files
# Close files
for _ in output_file:
_.close()
|
AlexCatarino/pythonnet
|
tests/test_field.py
|
Python
|
mit
| 8,719
| 0
|
# -*- coding: utf-8 -*-
"""Test CLR field support."""
import System
import pytest
from Python.Test import FieldTest
def test_public_instance_field():
"""Test public instance fields."""
ob = FieldTest()
assert ob.PublicField == 0
ob.PublicField = 1
assert ob.PublicField == 1
with pytest.raises(TypeError):
del FieldTest().PublicField
def test_public_static_field():
"""Test public static fields."""
ob = FieldTest()
assert FieldTest.PublicStaticField == 0
FieldTest.PublicStaticField = 1
assert FieldTest.PublicStaticField == 1
assert ob.PublicStaticField == 1
ob.PublicStaticField = 0
assert ob.PublicStaticField == 0
with pytest.raises(TypeError):
del FieldTest.PublicStaticField
with pytest.raises(TypeError):
del FieldTest().PublicStaticField
def test_protected_instance_field():
"""Test protected instance fields."""
ob = FieldTest()
assert ob.ProtectedField == 0
ob.ProtectedField = 1
assert ob.ProtectedField == 1
with pytest.raises(TypeError):
del FieldTest().ProtectedField
def test_protected_static_field():
"""Test protected static fields."""
ob = FieldTest()
assert FieldTest.ProtectedStaticField == 0
FieldTest.ProtectedStaticField = 1
assert FieldTest.ProtectedStaticField == 1
assert ob.ProtectedStaticField == 1
ob.ProtectedStaticField = 0
assert ob.ProtectedStaticField == 0
with pytest.raises(TypeError):
del FieldTest.ProtectedStaticField
with pytest.raises(TypeError):
del FieldTest().ProtectedStaticField
def test_read_only_instance_field():
"""Test readonly instance fields."""
assert FieldTest().ReadOnlyField == 0
with pytest.raises(TypeError):
FieldTest().ReadOnlyField = 1
with pytest.raises(TypeError):
del FieldTest().ReadOnlyField
def test_read_only_static_field():
"""Test readonly static fields."""
ob = FieldTest()
assert FieldTest.ReadOnlyStaticField == 0
assert ob.ReadOnlyStaticField == 0
with pytest.raises(TypeError):
FieldTest.ReadOnlyStaticField = 1
with pytest.raises(TypeError):
FieldTest().ReadOnlyStaticField = 1
with pytest.raises(TypeError):
del FieldTest.ReadOnlyStaticField
with pytest.raises(TypeError):
del FieldTest().ReadOnlyStaticField
def test_constant_field():
"""Test const fields."""
ob = FieldTest()
assert FieldTest.ConstField == 0
assert ob.ConstField == 0
with pytest.raises(TypeError):
FieldTest().ConstField = 1
with pytest.raises(TypeError):
FieldTest.ConstField = 1
with pytest.raises(TypeError):
del FieldTest().ConstField
with pytest.raises(TypeError):
del FieldTest.ConstField
def test_internal_field():
"""Test internal fields."""
with pytest.raises(AttributeError):
_ = FieldTest().InternalField
with pytest.raises(AttributeError):
_ = FieldTest().InternalStaticField
with pytest.raises(AttributeError):
_ =
|
FieldTest.InternalStaticField
def test_private_field():
"""Test private fields."""
with pytest.raises(AttributeError):
_ = FieldTest().PrivateFie
|
ld
with pytest.raises(AttributeError):
_ = FieldTest().PrivateStaticField
with pytest.raises(AttributeError):
_ = FieldTest.PrivateStaticField
def test_field_descriptor_get_set():
"""Test field descriptor get / set."""
# This test ensures that setting an attribute implemented with
# a descriptor actually goes through the descriptor (rather than
# silently replacing the descriptor in the instance or type dict.
ob = FieldTest()
assert FieldTest.PublicStaticField == 0
assert ob.PublicStaticField == 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
ob.PublicStaticField = 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
FieldTest.PublicStaticField = 0
descriptor = FieldTest.__dict__['PublicStaticField']
assert type(descriptor) != int
def test_field_descriptor_wrong_type():
"""Test setting a field using a value of the wrong type."""
with pytest.raises(ValueError):
FieldTest().PublicField = "spam"
def test_field_descriptor_abuse():
"""Test field descriptor abuse."""
desc = FieldTest.__dict__['PublicField']
with pytest.raises(TypeError):
desc.__get__(0, 0)
with pytest.raises(TypeError):
desc.__set__(0, 0)
def test_boolean_field():
"""Test boolean fields."""
# change this to true / false later for Python 2.3?
ob = FieldTest()
assert ob.BooleanField is False
ob.BooleanField = True
assert ob.BooleanField is True
ob.BooleanField = False
assert ob.BooleanField is False
ob.BooleanField = 1
assert ob.BooleanField is True
ob.BooleanField = 0
assert ob.BooleanField is False
def test_sbyte_field():
"""Test sbyte fields."""
ob = FieldTest()
assert ob.SByteField == 0
ob.SByteField = 1
assert ob.SByteField == 1
def test_byte_field():
"""Test byte fields."""
ob = FieldTest()
assert ob.ByteField == 0
ob.ByteField = 1
assert ob.ByteField == 1
def test_char_field():
"""Test char fields."""
ob = FieldTest()
assert ob.CharField == u'A'
assert ob.CharField == 'A'
ob.CharField = 'B'
assert ob.CharField == u'B'
assert ob.CharField == 'B'
ob.CharField = u'C'
assert ob.CharField == u'C'
assert ob.CharField == 'C'
def test_int16_field():
"""Test int16 fields."""
ob = FieldTest()
assert ob.Int16Field == 0
ob.Int16Field = 1
assert ob.Int16Field == 1
def test_int32_field():
"""Test int32 fields."""
ob = FieldTest()
assert ob.Int32Field == 0
ob.Int32Field = 1
assert ob.Int32Field == 1
def test_int64_field():
"""Test int64 fields."""
ob = FieldTest()
assert ob.Int64Field == 0
ob.Int64Field = 1
assert ob.Int64Field == 1
def test_uint16_field():
"""Test uint16 fields."""
ob = FieldTest()
assert ob.UInt16Field == 0
ob.UInt16Field = 1
assert ob.UInt16Field == 1
def test_uint32_field():
"""Test uint32 fields."""
ob = FieldTest()
assert ob.UInt32Field == 0
ob.UInt32Field = 1
assert ob.UInt32Field == 1
def test_uint64_field():
"""Test uint64 fields."""
ob = FieldTest()
assert ob.UInt64Field == 0
ob.UInt64Field = 1
assert ob.UInt64Field == 1
def test_single_field():
"""Test single fields."""
ob = FieldTest()
assert ob.SingleField == 0.0
ob.SingleField = 1.1
assert ob.SingleField == 1.1
def test_double_field():
"""Test double fields."""
ob = FieldTest()
assert ob.DoubleField == 0.0
ob.DoubleField = 1.1
assert ob.DoubleField == 1.1
def test_decimal_field():
"""Test decimal fields."""
ob = FieldTest()
assert ob.DecimalField == System.Decimal(0)
ob.DecimalField = System.Decimal(1)
assert ob.DecimalField == System.Decimal(1)
def test_string_field():
"""Test string fields."""
ob = FieldTest()
assert ob.StringField == "spam"
ob.StringField = "eggs"
assert ob.StringField == "eggs"
def test_interface_field():
"""Test interface fields."""
from Python.Test import Spam, ISpam
ob = FieldTest()
assert ISpam(ob.SpamField).GetValue() == "spam"
assert ob.SpamField.GetValue() == "spam"
ob.SpamField = Spam("eggs")
assert ISpam(ob.SpamField).GetValue() == "eggs"
assert ob.SpamField.GetValue() == "eggs"
def test_object_field():
"""Test ob fields."""
ob = FieldTest()
assert ob.ObjectField is None
ob.ObjectField = System.String("spam")
assert ob.ObjectField == "spam"
ob.ObjectField = System.Int32(1)
assert ob.ObjectField == 1
ob.ObjectField = None
assert ob.ObjectField is None
def test_enum_field():
"""Test enum fields."""
from Python.Test import ShortEnum
ob = FieldTest()
assert ob.EnumField == ShortEnum.Zero
|
libvirt/autotest
|
scheduler/email_manager.py
|
Python
|
gpl-2.0
| 3,273
| 0.002139
|
import traceback, socket, os, time, smtplib, re, sys, getpass, logging
try:
import autotest.common as common
except ImportError:
import common
from autotest_lib.client.common_lib import global_config
CONFIG_SECTION = 'SCHEDULER'
CONFIG_SECTION_SMTP = 'SERVER'
class EmailNotificationManager(object):
def __init__(self):
self._emails = []
self._from_address = global_config.global_config.get_config_value(
CONFIG_SECTION, "notify_email_from", default=getpass.getuser())
self._notify_address = global_config.global_config.get_config_value(
CONFIG_SECTION, "notify_email", default='')
self._smtp_server = global_config.global_config.get_config_value(
CONFIG_SECTION_SMTP, "smtp_server", default='localhost')
self._smtp_port = global_config.global_config.get_config_value(
CONFIG_SECTION_SMTP, "smtp_port", default=None)
self._smtp_user = global_config.global_config.get_config_value(
CONFIG_SECTION_SMTP, "smtp_user", default='')
self._smtp_password = global_config.global_config.get_config_value(
CONFIG_SECTION_SMTP, "smtp_password", default='')
def send_email(self, to_string, subject, body):
"""Mails out emails to the addresses listed in to_string.
to_string is split into a list which can be delimited by any of:
';', ',', ':' or any whitespace
"""
# Create list from string removing empty strings from the list.
to_list = [x for x in re.split('\s|,|;|:', to_string) if x]
if not to_list:
return
msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (
self._from_address, ', '.join(to_list), subject, body)
try:
mailer = smtplib.SMTP(self._smtp_server, self._smtp_port)
try:
if self._smtp_user:
mailer.login(self._smtp_user, self._smtp_password)
mailer.sendmail(self._from_address, to_list, msg)
finally:
try:
mailer.quit()
except:
logging.exception('mailer.quit() failed:')
except Exception:
logging.exception('Sending email failed:')
def enqueue_notify_email(self, subject, message):
logging.error(subject + '\n' + message)
if not self._notify_address:
return
body = 'Subject: ' + subject + '\n'
body += "%s / %s / %s\n%s" % (socket.gethostname(),
os.getpid(),
time.strftime("%X %x"), message)
self._emails.append(body)
def send_queued_emails(self):
if not self._emails:
return
subject = 'Scheduler notifications from ' + socket.gethostname()
|
separator = '\n' + '-' * 40 + '\n'
body = separator.join(self._emails)
self.send_email(self._notify_address, subject, body)
self._emails = []
def log_stacktrace(self, reason):
logging.exception(reason)
message = "EXCEPTION: %s\n%s" % (reason, traceback.format_exc())
self.enqueue_not
|
ify_email("monitor_db exception", message)
manager = EmailNotificationManager()
|
leonardoRC/PyNFe
|
pynfe/utils/flags.py
|
Python
|
lgpl-3.0
| 11,560
| 0.004088
|
# -*- coding: utf-8 -*-
NAMESPACE_NFE = 'http://www.portalfiscal.inf.br/nfe'
NAMESPACE_SIG = 'http://www.w3.org/2000/09/xmldsig#'
NAMESPACE_SOAP = 'http://www.w3.org/2003/05/soap-envelope'
NAMESPACE_XSI = 'http://www.w3.org/2001/XMLSchema-instance'
NAMESPACE_XSD = 'http://www.w3.org/2001/XMLSchema'
NAMESPACE_METODO = 'http://www.portalfiscal.inf.br/nfe/wsdl/'
VERSAO_PADRAO = '3.10'
VERSAO_QRCODE = '100'
TIPOS_DOCUMENTO = (
'CNPJ',
'CPF',
)
ICMS_TIPOS_TRIBUTACAO = (
('00', 'ICMS 00 - Tributada integralmente'),
('10', 'ICMS 10 - Tributada com cobranca do ICMS por substituicao tributaria'),
('20', 'ICMS 20 - Com reducao da base de calculo'),
('30', 'ICMS 30 - Isenta ou nao tributada e com cobranca do ICMS por substituicao tributaria'),
('40', 'ICMS 40 - Isenta'),
('41', 'ICMS 41 - Nao tributada'),
('50', 'ICMS 50 - Suspensao'),
('51', 'ICMS 51 - Diferimento'),
('60', 'ICMS 60 - Cobrado anteriormente por substituicao tributaria'),
('70', 'ICMS 70 - Com reducao da base de calculo e cobranca do ICMS por substituicao tributaria'),
('90', 'ICMS 90 - Outras'),
('101', 'ICMS 101 - Tributação ICMS pelo Simples Nacional, CSOSN=101'),
('102', 'ICMS 102 - Tributação ICMS pelo Simples Nacional, CSOSN=102, 103, 300 ou 400'),
('201', 'ICMS 201 - Tributação ICMS pelo Simples Nacional, CSOSN=201'),
('202', 'ICMS 202 - Tributação ICMS pelo Simples Nacional, CSOSN=202 ou 203'),
('500', 'ICMS 500 - Tributação ICMS pelo Simples Nacional, CSOSN=500'),
('900', 'ICMS 900 - Tributação ICMS pelo Simples Nacional, CSOSN=900'),
('ST', 'ICMS ST - Grupo de informação do ICMS ST devido para a UF de destino, nas operações interestaduais de produtos que tiveram retenção antecipada de ICMS por ST na UF do remetente. Repasse via Substituto Tributário.')
)
ICMS_ORIGENS = (
(0, 'Nacional'),
(1, 'Estrangeira - Importacao Direta'),
(2, 'Estrangeira - Adquirida no Mercado Interno'),
)
ICMS_MODALIDADES = (
(0, 'Margem Valor Agregado'),
(1, 'Pauta (valor)'),
(2, 'Preco Tabelado Max. (valor)'),
(3, 'Valor da Operacao'),
)
NF_STATUS = (
'Em Digitacao',
'Validada',
'Assinada',
'Em processamento',
'Autorizada',
'Rejeitada',
'Cancelada',
)
NF_TIPOS_DOCUMENTO = (
(0, 'Entrada'),
(1, 'Saida'),
)
NF_PROCESSOS_EMISSAO = (
(0, u'Emissão de NF-e com aplicativo do contribuinte'),
(1, u'Emissão de NF-e avulsa pelo Fisco'),
(2, u'Emissão de NF-e avulsa, pelo contribuinte com seu certificado digital, através do site do Fisco'),
(3, u'Emissão NF-e pelo contribuinte com aplicativo fornecido pelo Fisco'),
)
NF_TIPOS_IMPRESSAO_DANFE = (
(1, 'Retrato'),
(2, 'Paisagem'),
)
NF_FORMAS_PAGAMENTO = (
(0, 'Pagamento a vista'),
(1, 'Pagamento a prazo'),
(2, 'Outros'),
)
NF_FORMAS_EMISSAO = (
(1, 'Normal'),
(2, 'Contingencia'),
(3, 'Contingencia com SCAN'),
(4, 'Contingencia via DPEC'),
(5, 'Contingencia FS-DA'),
)
NF_FINALIDADES_EMISSAO = (
(1, 'NF-e normal'),
(2, 'NF-e complementar'),
(3, 'NF-e de ajuste'),
)
NF_REFERENCIADA_TIPOS = (
'Nota Fiscal eletronica',
'Nota Fiscal',
)
NF_PRODUTOS_ESPECIFICOS = (
'Veiculo',
'Medicamento',
'Armamento',
'Combustivel',
)
NF_AMBIENTES = (
(1, 'Producao'),
(2, 'Homologacao'),
)
IPI_TIPOS_TRIBUTACAO = (
('00', 'IPI 00 - Entrada com recuperacao de credito'),
('01', 'IPI 01 - Entrada tributada com aliquota zero'),
('02', 'IPI 02 - Entrada isenta'),
('03', 'IPI 03 - Entrada nao-tributada'),
('04', 'IPI 04 - Entrada imune'),
('05', 'IPI 05 - Entrada com suspensao'),
('49', 'IPI 49 - Outras entradas'),
('50', 'IPI 50 - Saida tributada'),
('51', 'IPI 51 - Saida tributada com aliquota zero'),
('52', 'IPI 52 - Saida isenta'),
('53', 'IPI 53 - Saida nao-tributada'),
('54', 'IPI 54 - Saida imune'),
('55', 'IPI 55 - Saida com suspensao'),
('99', 'IPI 99 - Outas saidas'),
)
IPI_TIPOS_CALCULO = (
'Percentual',
'Em Valor',
)
PIS_TIPOS_TRIBUTACAO = (
('01', 'PIS 01 - Operação Tributável - Base de cálculo = valor da operação alíquota normal (cumulativo/não cumulativo)'),
('02', 'PIS 02 - Operação Tributável - Base de cálculo = valor da operação (alíquota diferenciada)'),
('03', 'PIS 03 - Operacao Tributavel - Base de cálculo = quantidade vendida x alíquota por unidade de produto)'),
('04', 'PIS 04 - Operacao Tributavel - Tributacao Monofasica - (Aliquota Zero)'),
('06', 'PIS 06 - Operacao Tributavel - Aliquota Zero'),
('07', 'PIS 07 - Operacao Isenta da Contribuicao'),
('08', 'PIS 08 - Operacao sem Indidencia da Contribuicao'),
('09', 'PIS 09 - Operacao com Suspensao da Contribuicao'),
('49', 'PIS 49 - Outras Operações de Saída'),
('50', 'PIS 50 - Operação com Direito a Crédito - Vinculada Exclusivamente a Receita Tributada no Mercado Interno'),
('51', 'PIS 51 - Operação com Direito a Crédito - Vinculada Exclusivamente a Receita Não Tributada no Mercado Interno'),
('52', 'PIS 52 - Operação com Direito a Crédito – Vinculada Exclusivamente a Receita de Exportação'),
('53', 'PIS 53 - Operação com Direito a Crédito - Vinculada a Receitas Tributadas e Não-Tributadas no Mercado Interno'),
('54', 'PIS 54 - Operação com Direito a Crédito - Vinculada a Receitas Tributadas no Mercado Interno e de Exportação'),
('55', 'PIS 55 - Operação com Direito a Crédito - Vinculada a Receitas Não Tributadas no Mercado Interno e de Exportação'),
('56', 'PIS 56 - Operação com Direito a Crédito - Vinculada a Receitas Tributadas e Não Tributadas no Mercado Interno, e de Exportação'),
('60', 'PIS 60 - Crédito Presumido - Operação de Aquisição Vinculada Exclusivamente a Receita Tributada no Mercado Interno'),
('61', 'PIS 61 - Crédito Presumido - Operação de Aquisição Vinculada Exclusivamente a Receita Não Tributada no Mercado Interno'),
('62', 'PIS 62 - Crédito Presumido - Operação de Aquisição Vinculada Exclusivamente a Receita de Exportação'),
('63', 'PIS 63 - Crédito Presumido - Operação de Aquisição Vinculada a Receitas Tributadas e Não-Tributadas no Mercado Interno'),
('64', 'PIS 64 - Crédito Presumido - Operação de Aquisição Vinculada a Receitas Tributadas no Mercado Interno e de Exportação'),
('65', 'PIS 65 - Crédito Presumido - Operação de Aquisição Vinculada a Receitas Não Tributadas no Mercado Interno e de Exportação'),
('66', 'PIS 66 - Crédito Presumido - Operação de Aquisição Vinculada a Receitas Tributadas e Não-Tributadas no Mercado Interno, e de Exportação'),
('67', 'PIS 67 - Crédito Presumido - Outras Operações'),
('70', 'PIS 70 - Operação de Aquisição sem Direito a Crédit
|
o'),
('71', 'PIS 71 - Operação de Aquisição com Isenção'),
('72', 'PIS 72 - Operação de Aquisição com Suspensão'),
('73', 'PIS 73 - Operação de Aquisição a Alíquota Zero'),
('74', 'PIS 74 - Operação de Aquisição; sem Incidência da Co
|
ntribuição'),
('75', 'PIS 75 - Operação de Aquisição por Substituição Tributária'),
('98', 'PIS 98 - Outras Operações de Entrada'),
('99', 'PIS 99 - Outras operacoes'),
)
PIS_TIPOS_CALCULO = IPI_TIPOS_CALCULO
COFINS_TIPOS_TRIBUTACAO = (
('01', 'COFINS 01 - Operação Tributável - Base de cálculo = valor da operação alíquota normal (cumulativo/não cumulativo)'),
('02', 'COFINS 02 - Operação Tributável - Base de cálculo = valor da operação (alíquota diferenciada)'),
('03', 'COFINS 03 - Operacao Tributavel - Base de cálculo = quantidade vendida x alíquota por unidade de produto)'),
('04', 'COFINS 04 - Operacao Tributavel - Tributacao Monofasica - (Aliquota Zero)'),
('06', 'COFINS 06 - Operacao Tributavel - Aliquota Zero'),
('07', 'COFINS 07 - Operacao Isenta da Contribuicao'),
('08', 'COFINS 08 - Operacao sem Indidencia da Contribuicao'),
('09', 'COFINS 09 - Operacao com Suspensao da Contribuicao'),
('49', 'COFINS 49 - Outras Operações de Saída'),
('50', 'COFINS 50 - Operação com Direito a Crédito - Vinculada Exclusivamente a Receita Tributada no Mercado Interno'),
('51', 'COFINS 51 - Operação com Dir
|
Spoken-tutorial/spoken-website
|
creation/templatetags/creationdata.py
|
Python
|
gpl-3.0
| 11,230
| 0.011843
|
# Standard Library
from builtins import str
import os
import zipfile
from urllib.parse import quote_plus
from urllib.request import urlopen
# Third Party Stuff
from django import template
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models import Q
# Spoken Tutorial Stuff
from creation.models import *
from creation.views import (
is_administrator,
is_contenteditor,
is_contributor,
is_domainreviewer,
is_external_contributor,
is_internal_contributor,
is_qualityreviewer,
is_videoreviewer,
is_language_manager
)
from spoken.forms import TutorialSearchForm
register = template.Library()
def format_component_title(name):
return name.replace('_', ' ').capitalize()
def get_url_name(name):
return quote_plus(name)
def get_zip_content(path):
file_names = None
try:
zf = zipfile.ZipFile(path, 'r')
file_names = zf.namelist()
return file_names
except Exception as e:
return False
def is_script_available(path):
try:
code = urlopen(script_path).code
except Exception as e:
code = e.code
if(int(code) == 200):
return True
return False
def get_review_status_list(key):
status_list = ['Pending', 'Waiting for Admin Review', 'Waiting for Domain Review', 'Waiting for Quality Review', 'Accepted', 'Need Improvement', 'Not Required']
return status_list[key];
def get_review_status_class(key):
status_list = ['danger', 'active', 'warning', 'info', 'success', 'danger', 'success']
return status_list[key];
def get_review_status_symbol(key):
status_list = ['fa fa-1 fa-minus-circle review-pending-upload', 'fa fa-1 fa-check-circle review-admin-review', 'fa fa-1 fa-check-circle review-domain-review', 'fa fa-1 fa-check-circle review-quality-review', 'fa fa-1 fa-check-circle review-accepted', 'fa fa-1 fa-times-circle review-pending-upload', 'fa fa-1 fa-ban review-accepted']
return status_list[key];
def get_username(key):
user = User.objects.get(pk = key)
return user.username
def get_last_video_upload_ti
|
me(key):
rec = None
try:
rec = ContributorLog.objects.filter(tutorial_resource_id = key.id).order_by('-created')[0]
|
tmpdt = key.updated
for tmp in rec:
tmpdt = rec.created
return tmpdt
except:
return key.updated
def get_component_name(comp):
comps = {
1: 'Outline',
2: 'Script',
3: 'Video',
4: 'Slides',
5: 'Codefiles',
6: 'Assignment'
}
key = ''
try:
key = comps[comp]
except:
pass
return key.title()
def get_missing_component_reply(mcid):
rows = TutorialMissingComponentReply.objects.filter(missing_component_id = mcid)
replies = ''
for row in rows:
replies += '<p>' + row.reply_message + '<b> -' + row.user.username + '</b></p>'
if replies:
replies = '<br /><b>Replies:</b>' + replies
return replies
def formatismp4(path):
'''
** Registered to be used in jinja template **
Function takes in a file name and checks if the
last 3 characters are `mp4`.
'''
return path[-3:] == 'mp4' or path[-3:] == 'mov'
def instruction_sheet(foss, lang):
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Instruction-Sheet-' + lang.name + '.pdf'
if lang.name != 'English':
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Instruction-Sheet-' + lang.name + '.pdf'
return file_path
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Instruction-Sheet-English.pdf'
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Instruction-Sheet-English.pdf'
return file_path
return False
def installation_sheet(foss, lang):
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Installation-Sheet-' + lang.name + '.pdf'
if lang.name != 'English':
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Installation-Sheet-' + lang.name + '.pdf'
return file_path
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Installation-Sheet-English.pdf'
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Installation-Sheet-English.pdf'
return file_path
return False
def brochure(foss, lang):
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Brochure-' + lang.name + '.pdf'
if lang.name != 'English':
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Brochure-' + lang.name + '.pdf'
return file_path
file_path = settings.MEDIA_ROOT + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Brochure-English.pdf'
if os.path.isfile(file_path):
file_path = settings.MEDIA_URL + 'videos/' + str(foss.id) + '/' + foss.foss.replace(' ', '-') + '-Brochure-English.pdf'
return file_path
return False
def get_thumb_path(row, append_str):
path = settings.MEDIA_URL + 'videos/' + str(row.foss_id) + '/' + str(row.id) + '/' + row.tutorial.replace(' ', '-') + '-' + append_str + '.png'
return path
def get_srt_path(tr):
data = ''
english_srt = settings.MEDIA_ROOT + 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tr.tutorial_detail.tutorial.replace(' ', '-') + '-English.srt'
if os.path.isfile(english_srt):
data = '<track kind="captions" src="'+ settings.MEDIA_URL + 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tr.tutorial_detail.tutorial.replace(' ', '-') + '-English.srt' + '" srclang="en" label="English"></track>'
if tr.language.name != 'English':
native_srt = settings.MEDIA_ROOT + 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tr.tutorial_detail.tutorial.replace(' ', '-') + '-' + tr.language.name +'.srt'
print(native_srt)
if os.path.isfile(native_srt):
data += '<track kind="captions" src="'+ settings.MEDIA_URL + 'videos/' + str(tr.tutorial_detail.foss_id) + '/' + str(tr.tutorial_detail_id) + '/' + tr.tutorial_detail.tutorial.replace(' ', '-') + '-' + tr.language.name + '.srt' + '" srclang="en" label="' + tr.language.name + '"></track>'
return data
def get_video_visits(tr):
tr.hit_count = tr.hit_count + 1
tr.save()
return tr.hit_count
def get_prerequisite(tr, td):
print((tr, td))
try:
tr_rec = TutorialResource.objects.get(Q(status = 1) | Q(status = 2), tutorial_detail = td, language_id = tr.language_id)
return get_url_name(td.foss.foss) + '/' + get_url_name(td.tutorial) + '/' + tr_rec.language.name
except Exception as e:
print(e)
if tr.language.name != 'English':
try:
tr_rec = TutorialResource.objects.get(Q(status = 1) | Q(status = 2), tutorial_detail = td, language__name = 'English')
return get_url_name(td.foss.foss) + '/' + get_url_name(td.tutorial) + '/English'
except:
return None
pass
return None
def get_prerequisite_from_td(td, lang):
try:
tr_rec = TutorialResource.objects.get(Q(status = 1) | Q(status = 2), tutorial_detail = td, language_id = lang.id)
return tr_rec.id
except:
if lang.name != 'English':
try:
tr_rec = TutorialResource.objects.get(Q(status = 1) | Q(status = 2), tutorial_detail = td, language__name = 'English')
return tr_rec.id
except:
pass
return None
def get_timed_script(script_path, ti
|
MobileWebApps/backend-python-rest-gae
|
lib/rest_framework/tests/test_request.py
|
Python
|
bsd-3-clause
| 13,239
| 0.001057
|
"""
Tests for content parsing, and form-overloaded content parsing.
"""
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.handlers.wsgi import WSGIRequest
from django.test import TestCase
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.compat import patterns
from rest_framework.parsers import (
BaseParser,
FormParser,
MultiPartParser,
JSONParser
)
from rest_framework.request import Request, Empty
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.test import APIRequestFactory, APIClient
from rest_framework.views import APIView
from rest_framework.compat import six
from io import BytesIO
import json
factory = APIRequestFactory()
class PlainTextParser(BaseParser):
media_type = 'text/plain'
def parse(self, stream, media_type=None, parser_context=None):
"""
Returns a 2-tuple of `(data, files)`.
`data` will simply be a string representing the body of the request.
`files` will always be `None`.
"""
return stream.read()
class TestMethodOverloading(TestCase):
def test_method(self):
"""
Request methods should be same as underlying request.
"""
request = Request(factory.get('/'))
self.assertEqual(request.method, 'GET')
request = Request(factory.post('/'))
self.assertEqual(request.method, 'POST')
def test_overloaded_method(self):
"""
POST requests can be overloaded to another method by setting a
reserved form field
"""
request = Request(factory.post('/', {api_settings.FORM_METHOD_OVERRIDE: 'DELETE'}))
self.assertEqual(request.method, 'DELETE')
def test_x_http_method_override_header(self):
"""
POST requests can also be overloaded to another method by setting
the X-HTTP-Method-Override header.
"""
request = Request(factory.post('/', {'foo': 'bar'}, HTTP_X_HTTP_METHOD_OVERRIDE='DELETE'))
self.assertEqual(request.method, 'DELETE')
request = Request(factory.get('/', {'foo': 'bar'}, HTTP_X_HTTP_METHOD_OVERRIDE='DELETE'))
self.assertEqual(request.method, 'DELETE')
class TestContentParsing(TestCase):
def test_standard_behaviour_determines_no_content_GET(self):
"""
Ensure request.DATA returns empty QueryDict for GET request.
"""
request = Request(factory.get('/'))
self.assertEqual(request.DATA, {})
def test_standard_behaviour_determines_no_content_HEAD(self):
"""
Ensure request.DATA returns empty QueryDict for HEAD request.
"""
request = Request(factory.head('/'))
self.assertEqual(request.DATA, {})
def test_request_DATA_with_form_content(self):
"""
Ensure request.DATA returns content for POST request with form content.
"""
data = {'qwerty': 'uiop'}
request = Request(factory.post('/', data))
request.parsers = (FormParser(), MultiPartParser())
self.assertEqual(list(request.DATA.items()), list(data.items()))
def test_request_DATA_with_text_content(self):
"""
Ensure request.DATA returns content for POST request with
non-form content.
"""
content = six.b('qwerty')
content_type = 'text/plain'
request = Request(factory.post('/', content, content_type=content_type))
request.parsers = (PlainTextParser(),)
self.assertEqual(request.DATA, content)
def test_request_POST_with_form_content(self):
"""
Ensure request.POST returns content for POST request with form content.
"""
data = {'qwerty': 'uiop'}
request = Request(factory.post('/', data))
request.parsers = (FormParser(), MultiPartParser())
self.assertEqual(list(request.POST.items()), list(data.items()))
def test_standard_behaviour_determines_form_content_PUT(self):
"""
Ensure request.DATA returns content for PUT request with form content.
"""
data = {'qwerty': 'uiop'}
request = Request(factory.put('/', data))
request.parsers = (FormParser(), MultiPartParser())
self.assertEqual(list(request.DATA.items()), list(data.items()))
def test_standard_behaviour_determines_non_form_content_PUT(self):
"""
Ensure request.DATA returns content for PUT request with
non-form content.
"""
content = six.b('qwerty')
content_type = 'text/plain'
request = Request(factory.put('/', content, content_type=content_type))
request.parsers = (PlainTextParser(), )
self.assertEqual(request.DATA, content)
def test_overloaded_behaviour_allows_content_tunnelling(self):
"""
Ensure request.DATA returns content for overloaded POST request.
"""
json_data = {'foobar': 'qwerty'}
content = json.dumps(json_data)
content_type = 'application/json'
form_data = {
api_settings.FORM_CONTENT_OVERRIDE: content,
api_settings.FORM_CONTENTTYPE_OVERRIDE: content_type
}
request = Request(factory.post('/', form_data))
request.parsers = (JSONParser(), )
self.assertEqual(request.DATA, json_data)
def test_form_POST_unicode(self):
"""
JSON POST via default web interface with unicode data
"""
# Note: environ and other variables here have simplified content compared to real Request
CONTENT = b'_content_type=application%2Fjson&_content=%7B%22request%22%3A+4%2C+%22firm%22%3A+1%2C+%22text%22%3A+%22%D0%9F%D1%80%D0%B8%D0%B2%D0%B5%D1%82%21%22%7D'
environ = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(CONTENT),
'wsgi.input': BytesIO(CONTENT),
}
wsgi_request = WSGIRequest(environ=environ)
wsgi_request._load_post_and_files()
parsers = (JSONParser(), FormParser(), MultiPartParser())
parser_context = {
'encoding': 'utf-8',
'kwargs': {},
'args': (),
}
request = Request(wsgi_request, parsers=parsers, parser_context=parser_context)
method = request.method
self.assertEqual(method, 'POST')
self.assertEqual(request._content_type, 'application/json')
self.assertEqual(request._stream.getvalue(), b'{"request": 4, "firm": 1, "text": "\xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82!"}')
self.assertEqual(request._data, Empty)
self.assertEqual(request._files, Empty)
# def test_accessing_post_after_data_form(self):
# """
# Ensures request.POST can be accessed after request.DATA in
# form request.
# """
# data = {'qwerty': 'uiop'}
# request = factory.post('/', data=data)
# self.assertEqual(request.DATA.items(), data.items())
# self.assertEqual(request.POST.items(), data.items())
# def test_accessing_post_after_data_for_json(self):
# """
# Ensures request.POST can be accessed after request.DA
|
TA in
|
# json request.
# """
# data = {'qwerty': 'uiop'}
# content = json.dumps(data)
# content_type = 'application/json'
# parsers = (JSONParser, )
# request = factory.post('/', content, content_type=content_type,
# parsers=parsers)
# self.assertEqual(request.DATA.items(), data.items())
# self.assertEqual(request.POST.items(), [])
# def test_accessing_post_after_data_for_overloaded_json(self):
# """
# Ensures request.POST can be accessed after request.DATA in overloaded
# json request.
# """
# data = {'qwerty': 'uiop'}
# content = json.dumps(data)
# content_type = 'a
|
nth2say/simple_django_blog
|
blog/migrations/0001_initial.py
|
Python
|
mit
| 1,171
| 0.001708
|
# -*- coding: utf-8 -*-
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration
|
):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200)),
('content', models.TextField()),
('pub_date', models.DateTimeField()),
('summary', models.CharField(max_length=500)),
],
options={
'ordering': ('title',),
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
],
),
migrations.AddField(
model_name='article',
name='tag',
field=models.ManyToManyField(to='blog.Tag', blank=True),
),
]
|
savioabuga/lipame
|
lipame/taskapp/celery.py
|
Python
|
mit
| 903
| 0.005537
|
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('lipame')
class CeleryConfig(AppConfig):
name = 'lipame.taskapp'
verbose
|
_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragm
|
a: no cover
|
googlefonts/gftools
|
bin/gftools-nametable-from-filename.py
|
Python
|
apache-2.0
| 7,889
| 0.013056
|
#!/usr/bin/env python3
# Copyright 2013,2016 The Font Bakery Authors.
# Copyright 2017 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
"""
Replace a collection of fonts nametable's with new tables based on
the Google Fonts naming spec from just the filename.
The fsSelection, fsType and macStyle also get updated
to reflect the new names.
"""
from __future__ import print_function
import re
import ntpath
from argparse import (ArgumentParser,
RawTextHelpFormatter)
from fontTools.ttLib import TTFont, newTable
WIN_SAFE_STYLES = [
'Regular',
'Bold',
'Italic',
'BoldItalic',
]
MACSTYLE = {
'Regular': 0,
'Bold': 1,
'Italic': 2,
'Bold Italic': 3
}
# Weight name to value mapping:
WEIGHTS = {
"Thin": 250,
"ExtraLight": 275,
"Light": 300,
"Regular": 400,
"Italic": 400,
"Medium": 500,
"SemiBold": 600,
"Bold": 700,
"ExtraBold": 800,
"Black": 900
}
REQUIRED_FIELDS = [
(0, 1, 0, 0),
(1, 1, 0, 0),
(2, 1, 0, 0),
(3, 1, 0, 0),
(4, 1, 0, 0),
(5, 1, 0, 0),
(6, 1, 0, 0),
(7, 1, 0, 0),
(8, 1, 0, 0),
(9, 1, 0, 0),
(11, 1, 0, 0),
(12, 1, 0, 0),
(13, 1, 0, 0),
(14, 1, 0, 0),
(0, 3, 1, 1033),
(1, 3, 1, 1033),
(1, 3, 1, 1033),
(2, 3, 1, 1033),
(3, 3, 1, 1033),
(4, 3, 1, 1033),
(5, 3, 1, 1033),
(6, 3, 1, 1033),
(7, 3, 1, 1033),
(8, 3, 1, 1033),
(9, 3, 1, 1033),
(11, 3, 1, 1033),
(12, 3, 1, 1033),
(13, 3, 1, 1033),
(14, 3, 1, 1033),
]
def _split_camelcase(text):
return re.sub(r"(?<=\w)([A-Z])", r" \1", text)
def _mac_subfamily_name(style_name):
if style_name.startswith('Italic'):
pass
elif 'Italic' in style_name:
style_name = style_name.replace('Italic', ' Italic')
return style_name
def _unique_id(version, vendor_id, filename):
# Glyphsapp style 2.000;MYFO;Arsenal-Bold
# version;vendorID;filename
return '%s;%s;%s' % (version, vendor_id, filename)
def _version(text):
return re.search(r'[0-9]{1,4}\.[0-9]{1,8}', text).group(0)
def _full_name(family_name, style_name):
style_name = _mac_subfamily_name(style_name)
full_name = '%s %s' % (family_name, style_name)
return full_name
def _win_family_name(family_name, style_name):
name = family_name
if style_name not in WIN_SAFE_STYLES:
|
name = '%s %s' % (family_name, style_name)
if 'Italic' in name:
name = re.sub(r'Italic', r'', name)
return name
def _win_subfamily_name(style_name):
name = style_name
if 'BoldItalic' == name:
retu
|
rn 'Bold Italic'
elif 'Italic' in name:
return 'Italic'
elif name == 'Bold':
return 'Bold'
else:
return 'Regular'
def set_usWeightClass(style_name):
name = style_name
if name != 'Italic':
name = re.sub(r'Italic', r'', style_name)
return WEIGHTS[name]
def set_macStyle(style_name):
return MACSTYLE[style_name]
def set_fsSelection(fsSelection, style):
bits = fsSelection
if 'Regular' in style:
bits |= 0b1000000
else:
bits &= ~0b1000000
if style in ['Bold', 'BoldItalic']:
bits |= 0b100000
else:
bits &= ~0b100000
if 'Italic' in style:
bits |= 0b1
else:
bits &= ~0b1
if not bits:
bits = 0b1000000
return bits
def nametable_from_filename(filepath):
"""Generate a new nametable based on a ttf and the GF Spec"""
font = TTFont(filepath)
old_table = font['name']
new_table = newTable('name')
filename = ntpath.basename(filepath)[:-4]
family_name, style_name = filename.split('-')
family_name = _split_camelcase(family_name)
font_version = font['name'].getName(5, 3, 1, 1033)
font_version = font_version.toUnicode()
vendor_id = font['OS/2'].achVendID
# SET MAC NAME FIELDS
# -------------------
# Copyright
old_cp = old_table.getName(0, 3, 1, 1033).string.decode('utf_16_be')
new_table.setName(old_cp.encode('mac_roman'), 0, 1, 0, 0)
# Font Family Name
new_table.setName(family_name.encode('mac_roman'), 1, 1, 0, 0)
# Subfamily name
mac_subfamily_name = _mac_subfamily_name(style_name).encode('mac_roman')
new_table.setName(mac_subfamily_name, 2, 1, 0, 0)
# Unique ID
unique_id = _unique_id(_version(font_version), vendor_id, filename)
mac_unique_id = unique_id.encode('mac_roman')
new_table.setName(mac_unique_id, 3, 1, 0, 0)
# Full name
fullname = _full_name(family_name, style_name)
mac_fullname = fullname.encode('mac_roman')
new_table.setName(mac_fullname, 4, 1, 0, 0)
# Version string
old_v = old_table.getName(5, 3, 1, 1033).string.decode('utf_16_be')
mac_old_v = old_v.encode('mac_roman')
new_table.setName(mac_old_v, 5, 1, 0, 0)
# Postscript name
mac_ps_name = filename.encode('mac_roman')
new_table.setName(mac_ps_name, 6, 1, 0, 0)
# SET WIN NAME FIELDS
# -------------------
# Copyright
new_table.setName(old_cp, 0, 3, 1, 1033)
# Font Family Name
win_family_name = _win_family_name(family_name, style_name)
win_family_name = win_family_name.encode('utf_16_be')
new_table.setName(win_family_name, 1, 3, 1, 1033)
# Subfamily Name
win_subfamily_name = _win_subfamily_name(style_name).encode('utf_16_be')
new_table.setName(win_subfamily_name, 2, 3, 1, 1033)
# Unique ID
win_unique_id = unique_id.encode('utf_16_be')
new_table.setName(win_unique_id, 3, 3, 1, 1033)
# Full name
win_fullname = fullname.encode('utf_16_be')
new_table.setName(win_fullname, 4, 3, 1, 1033)
# Version string
win_old_v = old_v.encode('utf_16_be')
new_table.setName(win_old_v, 5, 3, 1, 1033)
# Postscript name
win_ps_name = filename.encode('utf_16_be')
new_table.setName(win_ps_name, 6, 3, 1, 1033)
if style_name not in WIN_SAFE_STYLES:
# Preferred Family Name
new_table.setName(family_name.encode('utf_16_be'), 16, 3, 1, 1033)
# Preferred SubfamilyName
win_pref_subfam_name = _mac_subfamily_name(style_name).encode('utf_16_be')
new_table.setName(win_pref_subfam_name, 17, 3, 1, 1033)
# PAD missing fields
# ------------------
for field in REQUIRED_FIELDS:
text = None
if new_table.getName(*field):
pass # Name has already been updated
elif old_table.getName(*field):
text = old_table.getName(*field).string
elif old_table.getName(field[0], 3, 1, 1033):
text = old_table.getName(field[0], 3, 1, 1033).string.decode('utf_16_be')
elif old_table.getName(field[0], 1, 0, 0): # check if field exists for mac
text = old_table.getName(field[0], 3, 1, 1033).string.decode('mac_roman')
if text:
new_table.setName(text, *field)
return new_table
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument('fonts', nargs="+")
def main():
args = parser.parse_args()
for font_path in args.fonts:
nametable = nametable_from_filename(font_path)
font = TTFont(font_path)
font_filename = ntpath.basename(font_path)
font['name'] = nametable
style = font_filename[:-4].split('-')[-1]
font['OS/2'].usWeightClass = set_usWeightClass(style)
font['OS/2'].fsSelection = set_fsSelection(font['OS/2'].fsSelection, style)
win_style = font['name'].getName(2, 3, 1, 1033).string.decode('utf_16_be')
font['head'].macStyle = set_macStyle(win_style)
font.save(font_path + '.fix')
print('font saved %s.fix' % font_path)
if __name__ == '__main__':
main()
|
mic4ael/indico
|
indico/modules/groups/__init__.py
|
Python
|
mit
| 889
| 0.001125
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode
|
_literals
from flask import session
from indico.core import signals
from indico.modules.groups.core import GroupProxy
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.menu import SideMenuItem
__all__ = ('GroupProxy',)
@signals.menu.items.connect_via('admin-sidemenu')
def _extend_admin_menu(sender, **kwargs):
i
|
f session.user.is_admin:
return SideMenuItem('groups', _("Groups"), url_for('groups.groups'), section='user_management')
@signals.users.merged.connect
def _merge_users(target, source, **kwargs):
target.local_groups |= source.local_groups
source.local_groups.clear()
|
ystk/debian-audit
|
system-config-audit/src/dialog_base.py
|
Python
|
gpl-2.0
| 5,208
| 0.000768
|
# Common dialog code.
#
# Copyright (C) 2007, 2008 Red Hat, Inc. All rights reserved.
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have
# received a copy of the GNU General Public License along with this program; if
# not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks that are
# incorporated in the source code or documentation are not subject to the GNU
# General Public License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
# Red Hat Author: Miloslav Trmac <mitr@redhat.com>
import os
import gtk.glade
import settings
__all__ = ('DialogBase')
class DialogBase(object):
'''Commmon utilities for dialogs.'''
def __init__(self, toplevel_name, parent, notebook_name = None):
'''Create a window from the glade file and get references to widgets.
If notebook_name is not None, use it in validate_values(). Make the
window transient for parent.
'''
glade_xml = gtk.glade.XML(settings.glade_file_path, toplevel_name)
for name in self._glade_widget_names:
w = glade_xml.get_widget(name)
assert w is not None, 'Widget %s not found in glade file' % name
setattr(self, name, w)
# This name is special :)
self.window = glade_xml.get_widget(toplevel_name)
if parent is not None:
self.window.set_transient_for(parent)
if notebook_name is None:
self.__notebook_widget = None
else:
self.__notebook_widget = glade_xml.get_widget(notebook_name)
assert self.__notebook_widget is not None
def destroy(self):
'''Destroy th
|
e dialog.'''
self.window.destroy()
|
def _validate_get_failure(self):
'''Check whether the window state is a valid configuration.
Return None if it is valid. Otherwise, return (message, notebook page
index or None, widget).
'''
raise NotImplementedError()
def _validate_values(self):
'''Check whether the dialog state is a valid configuration.
Return True if it is valid. Otherwise, display an error message and
return False.
'''
a = self._validate_get_failure()
if a is None:
return True
(msg, page, widget) = a
if self.__notebook_widget is not None:
self.__notebook_widget.set_current_page(page)
self._modal_error_dialog(msg)
widget.grab_focus()
return False
def _modal_error_dialog(self, msg):
'''Show a modal error dialog.'''
dlg = gtk.MessageDialog(self.window, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE, msg)
dlg.run()
dlg.destroy()
def _radio_set(self, value, pairs):
'''Update the "active" state of several toggle buttons.
The pairs parameter is a tuple of (widget name, expected value) pairs.
Expected value is either a single value, or a tuple of possible values.
'''
for (name, expected) in pairs:
if type(expected) == tuple:
active = value in expected
else:
active = value == expected
getattr(self, name).set_active(active)
def _radio_get(self, pairs):
'''Get the "active" button from a group of radio buttons.
The pairs parameter is a tuple of (widget name, return value) pairs.
If no widget is active, an assertion will fail.
'''
for (name, value) in pairs:
if getattr(self, name).get_active():
return value
assert False, 'No widget is active'
def _setup_browse_button(self, button, entry, title, action):
'''Set up a "Browse" button for a path entry.'''
button.connect('clicked', self.__browse_button_clicked, entry, title,
action)
def __browse_button_clicked(self, unused, entry, title, action):
if action == gtk.FILE_CHOOSER_ACTION_SAVE:
stock_accept = gtk.STOCK_SAVE
else:
stock_accept = gtk.STOCK_OPEN
dlg = gtk.FileChooserDialog(title, self.window, action,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
stock_accept, gtk.RESPONSE_ACCEPT))
path = entry.get_text()
if action == gtk.FILE_CHOOSER_ACTION_SAVE:
dlg.set_current_folder(os.path.dirname(path))
dlg.set_current_name(os.path.basename(path))
else:
dlg.set_filename(path)
r = dlg.run()
if r == gtk.RESPONSE_ACCEPT:
entry.set_text(dlg.get_filename())
dlg.destroy()
|
thyagostall/apollo
|
src/problem.py
|
Python
|
mit
| 6,320
| 0.004589
|
from enum import Enum
from collections import namedtuple
import settings
import os
import dbaccess
import urllib.request
import ast
import shutil
def move_file(filename, src, dest):
src = os.path.join(src, filename)
dest = os.path.join(dest, filename)
shutil.move(src, dest)
def create_file(filename, path):
filename = os.path.join(path, filename)
f = open(filename, 'w+')
f.close()
def delete_file(filename, path):
filename = os.path.join(path, filename)
os.remove(filename)
def delete_directory(directory):
os.rmdir(directory)
def get_problem_name(number):
url = "http://uhunt.felix-halim.net/api/p/id/{0}".format(number)
with urllib.request.urlopen(url) as response:
result = response.read()
result = ast.literal_eval(result.decode('utf-8'))
result = result["title"]
return result
class Language(Enum):
C, CPP, JAVA, PYTHON = range(4)
language_extensions = {
Language.C: 'c',
Language.CPP: 'cpp',
Language.JAVA: 'java',
Language.PYTHON: 'py'}
class Status(Enum):
TEMPORARY, WORKING, PAUSED, FINISHED, ARCHIVED = range(5)
@staticmethod
def get_directory(status):
return status.name.lower()
class ProblemData(object):
def __init__(self, problem_id, name, category_id=None):
self.problem_id = problem_id
self.name = name
self.category_id = category_id
self.language = None
self.attempt_no = None
self.status = None
self.source_file = None
self.input_file = None
self.output_file = None
def __eq__(self, other):
if other:
return self.problem_id == other.problem_id and self.attempt_no == other.attempt_no
else:
return False
def __ne__(self, other):
if other:
return other and self.problem_id != other.problem_id or self.attempt_no != other.attempt_no
else:
return True
class ProblemNotFound(Exception):
pass
class ProblemManager(object):
def __get_problem_from_db(self, problem_id):
result = dbaccess.read('problem', where={'id': problem_id})
if result:
result = result[0]
problem = ProblemData(result[0], result[1], result[2])
return problem
else:
return None
def create_files(self, problem):
path = os.path.join(settings.get('repo_path'), Status.get_directory(problem.status))
create_file(problem.source_file, path)
create_file(problem.input_file, path)
create_file(problem.output_file, path)
def create_data(self, problem):
result = dbaccess.read('problem', where={'id': problem.problem_id})
if not result:
dbaccess.insert('problem', data={'id': problem.problem_id,
'name': problem.name, 'category_id': problem.category_id})
result = dbaccess.read('problem_attempt', where={'problem_id': problem.problem_id})
attempt_no = len(result)
attempt_no += 1
dbaccess.insert('problem_attempt',
data={'problem_id': problem.problem_id, 'attempt_no': attempt_no,
'language_id': problem.language.value, 'status_id': problem.status.value})
def delete_files(self, problem):
path = os.path.join(settings.get('repo_path'), Status.get_directory(problem.status))
delete_file(problem.source_file, path)
delete_file(problem.input_file, path)
delete_file(problem.output_file, path)
def delete_data(self, problem):
dbaccess.delete('problem_attempt',
where={'problem_id': problem.problem_id,
'attempt_no': problem.attempt_no})
result = dbaccess.read('problem_attempt',
where={'problem_id': problem.problem_id})
if not result:
dbaccess.delete('problem', where={'id': problem.problem_id})
def set_status(self, status, problem):
src_dir = Status.get_directory(problem.status)
src_dir = os.path.join(settings.get('repo_path'), src_dir)
problem.status = status
dest_dir = Status.get_directory(problem.status)
dest_dir = os.path.join(settings.get('repo_path'), dest_dir)
move_file(problem.source_file, src_dir, dest_dir)
move_file(problem.input_file, src_dir, des
|
t_dir)
|
move_file(problem.output_file, src_dir, dest_dir)
dbaccess.update(
'problem_attempt',
data={'status_id': problem.status.value},
where={'problem_id': problem.problem_id,
'attempt_no': problem.attempt_no})
def get_data_for_new(self, problem_id, language):
problem = self.__get_problem_from_db(problem_id)
if not problem:
name = get_problem_name(problem_id)
problem = ProblemData(problem_id, name, None)
problem.language = language
result = dbaccess.read('problem_attempt', where={'problem_id': problem_id})
problem.attempt_no = len(result) + 1
problem.status = Status.TEMPORARY
prefix = str(problem_id) + '.'
problem.source_file = prefix + language_extensions[language]
problem.input_file = prefix + 'in'
problem.output_file = prefix + 'out'
return problem
def get_data(self, problem_id, attempt_no):
problem = self.__get_problem_from_db(problem_id)
result = dbaccess.read('problem_attempt',
columns=['status_id', 'language_id'],
where={'problem_id': problem_id, 'attempt_no': attempt_no})
if not result:
message = ' '.join(['Problem:', str(problem_id), 'was not found on the database.'])
raise ProblemNotFound(message)
problem.attempt_no = attempt_no
problem.status = Status(result[0][0])
problem.language = Language(result[0][1])
prefix = str(problem_id) + '.'
problem.source_file = prefix + language_extensions[problem.language]
problem.input_file = prefix + 'in'
problem.output_file = prefix + 'out'
return problem
def update_category(self, problem):
dbaccess.update('problem', data={'category_id': problem.category_id}, where={'id': problem.problem_id})
|
gwu-libraries/vivo2notld
|
vivo2notld/definitions/organization_summary.py
|
Python
|
mit
| 157
| 0.006369
|
definition = {
"where": "?subj a foaf:Organization .",
"fields": {
"name": {
|
"where": "?subj rdfs:label ?
|
obj ."
}
}
}
|
tombstone/models
|
official/vision/detection/modeling/architecture/nn_blocks.py
|
Python
|
apache-2.0
| 11,423
| 0.002626
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common building blocks for neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class ResidualBlock(tf.keras.layers.Layer):
"""A residual block."""
def __init__(self,
filters,
strides,
use_projection=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""A residual block with BN after convolutions.
Args:
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
**kwargs: keyword arguments to be passed.
"""
super(ResidualBlock, self).__init__(**kwargs)
self._filters = filters
self._strides = strides
self._use_projection = use_projection
self._use_sync_bn = use_sync_bn
self._activation = activation
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_eps
|
ilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -
|
1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape):
if self._use_projection:
self._shortcut = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=1,
strides=self._strides,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv1 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=3,
strides=self._strides,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv2 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
super(ResidualBlock, self).build(input_shape)
def get_config(self):
config = {
'filters': self._filters,
'strides': self._strides,
'use_projection': self._use_projection,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(ResidualBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
shortcut = inputs
if self._use_projection:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation_fn(x)
x = self._conv2(x)
x = self._norm2(x)
return self._activation_fn(x + shortcut)
@tf.keras.utils.register_keras_serializable(package='Vision')
class BottleneckBlock(tf.keras.layers.Layer):
"""A standard bottleneck block."""
def __init__(self,
filters,
strides,
use_projection=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""A standard bottleneck block with BN after convolutions.
Args:
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
**kwargs: keyword arguments to be passed.
"""
super(BottleneckBlock, self).__init__(**kwargs)
self._filters = filters
self._strides = strides
self._use_projection = use_projection
self._use_sync_bn = use_sync_bn
self._activation = activation
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if use_sync_bn:
self._norm =
|
51reboot/actual_09_homework
|
07/zhaoyuanhai/cmdb/loganalysis.py
|
Python
|
mit
| 1,002
| 0.003018
|
#encoding:utf-8
def get_topn(logfile, topn=10):
fhandler = open(logfile, 'r')
rt_dict = {}
# 统计
while True:
line = fhandler.readline()
if line == '':
break
nodes
|
= line.split()
ip, url, code = nodes[0], nodes[6], nodes[8]
key = (ip, url, code)
if key not in rt_dict:
rt_dict[key] = 1
else:
rt_dict[key] = rt_dict[key] + 1
fhandler.
|
close()
#print rt_dict
# 排序
rt_list = rt_dict.items()
# [(key, value), (key, value)]
for j in range(0, topn):
for i in range(0, len(rt_list) - 1):
if rt_list[i][1] > rt_list[i + 1][1]:
temp = rt_list[i]
rt_list[i] = rt_list[i + 1]
rt_list[i + 1] = temp
return rt_list[-1:-topn - 1:-1]
if __name__ == '__main__':
logfile = '/home/share/www_access_20140823.log'
print get_topn(topn=5, logfile=logfile)
|
Rub4ek/scalors-assignment-backend
|
reminder/tasks.py
|
Python
|
mit
| 447
| 0
|
from django.core.mail import send_mail
from greatesttodo.celery import app
from reminder.models import Reminder
@app.task
def send_email_reminder(reminder_id):
try:
reminder = Reminder.objects.get(id=reminder_id)
send_mail(
|
subject=reminder.text,
message=reminder.text,
from_email=None,
recipient_list=[reminder.email]
)
except Reminder.DoesNotExist:
pass
| |
gwr/samba
|
source4/scripting/python/samba/__init__.py
|
Python
|
gpl-3.0
| 11,696
| 0.002137
|
#!/usr/bin/env python
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
#
# Based on the original in EJS:
# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Samba 4."""
__docformat__ = "restructuredText"
import os
import sys
import samba.param
def source_tree_topdir():
'''return the top level directory (the one containing the source4 directory)'''
paths = [ "../../..", "../../../.." ]
for p in paths:
topdir = os.path.normpath(os.path.join(os.path.dirname(__file__), p))
if os.path.exists(os.path.join(topdir, 'source4')):
return topdir
raise RuntimeError("unable to find top level source directory")
def in_source_tree():
'''return True if we are running from within the samba source tree'''
try:
topdir = source_tree_topdir()
except RuntimeError:
return False
return True
import ldb
from samba._ldb import Ldb as _Ldb
class Ldb(_Ldb):
"""Simple Samba-specific LDB subclass that takes care
of setting up the modules dir, credentials pointers, etc.
Please note that this is intended to be for all Samba LDB files,
not necessarily the Sam database. For Sam-specific helper
functions see samdb.py.
"""
def __init__(self, url=None, lp=None, modules_dir=None, session_info=None,
credentials=None, flags=0, options=None):
"""Opens a Samba Ldb file.
:param url: Optional LDB URL to open
:param lp: Optional loadparm object
:param
|
modules_dir: Optional modules directory
:param session_info: Optional session information
:param credentials: Optional credentials, defaults to anonymous.
|
:param flags: Optional LDB flags
:param options: Additional options (optional)
This is different from a regular Ldb file in that the Samba-specific
modules-dir is used by default and that credentials and session_info
can be passed through (required by some modules).
"""
if modules_dir is not None:
self.set_modules_dir(modules_dir)
else:
self.set_modules_dir(os.path.join(samba.param.modules_dir(), "ldb"))
if session_info is not None:
self.set_session_info(session_info)
if credentials is not None:
self.set_credentials(credentials)
if lp is not None:
self.set_loadparm(lp)
# This must be done before we load the schema, as these handlers for
# objectSid and objectGUID etc must take precedence over the 'binary
# attribute' declaration in the schema
self.register_samba_handlers()
# TODO set debug
def msg(l, text):
print text
#self.set_debug(msg)
self.set_utf8_casefold()
# Allow admins to force non-sync ldb for all databases
if lp is not None:
nosync_p = lp.get("nosync", "ldb")
if nosync_p is not None and nosync_p == True:
flags |= ldb.FLG_NOSYNC
self.set_create_perms(0600)
if url is not None:
self.connect(url, flags, options)
def searchone(self, attribute, basedn=None, expression=None,
scope=ldb.SCOPE_BASE):
"""Search for one attribute as a string.
:param basedn: BaseDN for the search.
:param attribute: Name of the attribute
:param expression: Optional search expression.
:param scope: Search scope (defaults to base).
:return: Value of attribute as a string or None if it wasn't found.
"""
res = self.search(basedn, scope, expression, [attribute])
if len(res) != 1 or res[0][attribute] is None:
return None
values = set(res[0][attribute])
assert len(values) == 1
return self.schema_format_value(attribute, values.pop())
def erase_users_computers(self, dn):
"""Erases user and computer objects from our AD.
This is needed since the 'samldb' module denies the deletion of primary
groups. Therefore all groups shouldn't be primary somewhere anymore.
"""
try:
res = self.search(base=dn, scope=ldb.SCOPE_SUBTREE, attrs=[],
expression="(|(objectclass=user)(objectclass=computer))")
except ldb.LdbError, (errno, _):
if errno == ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
return
else:
raise
try:
for msg in res:
self.delete(msg.dn, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
raise
def erase_except_schema_controlled(self):
"""Erase this ldb.
:note: Removes all records, except those that are controlled by
Samba4's schema.
"""
basedn = ""
# Try to delete user/computer accounts to allow deletion of groups
self.erase_users_computers(basedn)
# Delete the 'visible' records, and the invisble 'deleted' records (if this DB supports it)
for msg in self.search(basedn, ldb.SCOPE_SUBTREE,
"(&(|(objectclass=*)(distinguishedName=*))(!(distinguishedName=@BASEINFO)))",
[], controls=["show_deleted:0", "show_recycled:0"]):
try:
self.delete(msg.dn, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore no such object errors
raise
res = self.search(basedn, ldb.SCOPE_SUBTREE,
"(&(|(objectclass=*)(distinguishedName=*))(!(distinguishedName=@BASEINFO)))", [], controls=["show_deleted:0", "show_recycled:0"])
assert len(res) == 0
# delete the specials
for attr in ["@SUBCLASSES", "@MODULES",
"@OPTIONS", "@PARTITION", "@KLUDGEACL"]:
try:
self.delete(attr, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore missing dn errors
raise
def erase(self):
"""Erase this ldb, removing all records."""
self.erase_except_schema_controlled()
# delete the specials
for attr in ["@INDEXLIST", "@ATTRIBUTES"]:
try:
self.delete(attr, ["relax:0"])
except ldb.LdbError, (errno, _):
if errno != ldb.ERR_NO_SUCH_OBJECT:
# Ignore missing dn errors
raise
def load_ldif_file_add(self, ldif_path):
"""Load a LDIF file.
:param ldif_path: Path to LDIF file.
"""
self.add_ldif(open(ldif_path, 'r').read())
def add_ldif(self, ldif, controls=None):
"""Add data based on a LDIF string.
:param ldif: LDIF text.
"""
for changetype, msg in self.parse_ldif(ldif):
assert changetype == ldb.CHANGETYPE_NONE
self.add(msg, controls)
def modify_ldif(self, ldif, controls=None):
"""Modify database based on a LDIF string.
:param ldif: LDIF text.
"""
for changetype, msg in self.parse_ldif(ldif):
if changetype == ldb.CHANGETYPE_ADD:
self.add(msg, controls)
else:
self.modify(msg, controls)
de
|
yahman72/robotframework
|
utest/running/test_handlers.py
|
Python
|
apache-2.0
| 12,868
| 0.001399
|
import unittest
import sys
import inspect
from robot.running.handlers import _PythonHandler, _JavaHandler, DynamicHandler
from robot import utils
from robot.utils.asserts import *
from robot.running.testlibraries import TestLibrary
from robot.running.dynamicmethods import (
GetKeywordArguments, GetKeywordDocumentation, RunKeyword)
from robot.errors import DataError
from classes import NameLibrary, DocLibrary, ArgInfoLibrary
from ArgumentsPython import ArgumentsPython
if utils.JYTHON:
import ArgumentsJava
def _get_handler_methods(lib):
attrs = [getattr(lib, a) for a in dir(lib) if not a.startswith('_')]
return [a for a in attrs if inspect.ismethod(a)]
def _get_java_handler_methods(lib):
# This hack assumes that all java handlers used start with 'a_' -- easier
# than excluding 'equals' etc. otherwise
return [a for a in _get_handler_methods(lib) if a.__name__.startswith('a_') ]
class LibraryMock:
def __init__(self, name='MyLibrary', scope='GLOBAL'):
self.name = self.orig_name = name
self.scope = scope
class TestPythonHandler(unittest.TestCase):
def test_name(self):
for method in _get_handler_methods(NameLibrary()):
handler = _PythonHandler(LibraryMock('mylib'), method.__name__, method)
assert_equals(handler.name, method.__doc__)
assert_equals(handler.longname, 'mylib.'+method.__doc__)
def test_docs(self):
for method in _get_handler_methods(DocLibrary()):
handler = _PythonHandler(LibraryMock(), method.__name__, method)
assert_equals(handler.doc, method.expected_doc)
assert_equals(handler.shortdoc, method.expected_shortdoc)
def test_arguments(self):
for method in _get_handler_methods(ArgInfoLibrary()):
handler = _PythonHandler(LibraryMock(), method.__name__, method)
args = handler.arguments
argspec = (args.positional, args.defaults, args.varargs, args.kwargs)
expected = eval(method.__doc__)
assert_equals(argspec, expected, method.__name__)
def test_arg_limits(self):
for method in _get_handler_methods(ArgumentsPython()):
handler = _PythonHandler(LibraryMock(), method.__name__, method)
exp_mina, exp_maxa = eval(method.__doc__)
assert_equals(handler.arguments.minargs, exp_mina)
assert_equals(handler.arguments.maxargs, exp_maxa)
def test_getarginfo_getattr(self):
handlers = TestLibrary('classes.GetattrLibrary').handlers
assert_equals(len(handlers), 3)
for handler in handlers:
assert_true(handler.name in ['Foo','Bar','Zap'])
assert_equals(handler.arguments.minargs, 0)
assert_equals(handler.arguments.maxargs, sys.maxint)
class TestDynamicHandlerCreation(unittest.TestCase):
def test_none_doc(self):
self._assert_doc(None, '')
def test_empty_doc(self):
self._assert_doc('')
def test_non_empty_doc(self):
self._assert_doc('This is some documentation')
def test_non_ascii_doc(self):
self._assert_doc(u'P\xe4iv\xe4\xe4')
if not utils.IRONPYTHON:
def test_with_utf8_doc(self):
doc = u'P\xe4iv\xe4\xe4'
self._assert_doc(doc.encode('UTF-8'), doc)
def test_invalid_doc_type(self):
self._assert_fails('Return value must be st
|
ring.', doc=True)
def test_none_argspec(self):
self._assert_spec(None, maxargs=sys.maxint, vararg='varargs', kwarg=False)
def test_none_argspec_when_kwargs_supported(self):
self._assert_spec(None, maxargs
|
=sys.maxint, vararg='varargs', kwarg='kwargs')
def test_empty_argspec(self):
self._assert_spec([])
def test_mandatory_args(self):
for argspec in [['arg'], ['arg1', 'arg2', 'arg3']]:
self._assert_spec(argspec, len(argspec), len(argspec), argspec)
def test_only_default_args(self):
self._assert_spec(['defarg1=value', 'defarg2=defvalue'], 0, 2,
['defarg1', 'defarg2'], ['value', 'defvalue'])
def test_default_value_may_contain_equal_sign(self):
self._assert_spec(['d=foo=bar'], 0, 1, ['d'], ['foo=bar'])
def test_varargs(self):
self._assert_spec(['*vararg'], 0, sys.maxint, vararg='vararg')
def test_kwargs(self):
self._assert_spec(['**kwarg'], 0, 0, kwarg='kwarg')
def test_varargs_and_kwargs(self):
self._assert_spec(['*vararg', '**kwarg'],
0, sys.maxint, vararg='vararg', kwarg='kwarg')
def test_integration(self):
self._assert_spec(['arg', 'default=value'], 1, 2,
['arg', 'default'], ['value'])
self._assert_spec(['arg', 'default=value', '*var'], 1, sys.maxint,
['arg', 'default'], ['value'], 'var')
self._assert_spec(['arg', 'default=value', '**kw'], 1, 2,
['arg', 'default'], ['value'], None, 'kw')
self._assert_spec(['arg', 'default=value', '*var', '**kw'], 1, sys.maxint,
['arg', 'default'], ['value'], 'var', 'kw')
def test_invalid_argspec_type(self):
for argspec in [True, [1, 2]]:
self._assert_fails("Return value must be list of strings.", argspec)
def test_mandatory_arg_after_default_arg(self):
for argspec in [['d=v', 'arg'], ['a', 'b', 'c=v', 'd']]:
self._assert_fails('Non-default argument after default arguments.',
argspec)
def test_positional_after_vararg(self):
for argspec in [['*foo', 'arg'], ['arg', '*var', 'arg'],
['a', 'b=d', '*var', 'c'], ['*var', '*vararg']]:
self._assert_fails('Positional argument after varargs.', argspec)
def test_kwarg_not_last(self):
for argspec in [['**foo', 'arg'], ['arg', '**kw', 'arg'],
['a', 'b=d', '**kw', 'c'], ['**kw', '*vararg'],
['**kw', '**kwarg']]:
self._assert_fails('Only last argument can be kwargs.', argspec)
def test_missing_kwargs_support(self):
self._assert_fails("Too few 'run_keyword' method parameters"
" for **kwargs support.",
['**kwargs'])
def _assert_doc(self, doc, expected=None):
expected = doc if expected is None else expected
assert_equals(self._create_handler(doc=doc).doc, expected)
def _assert_spec(self, argspec, minargs=0, maxargs=0, positional=[],
defaults=[], vararg=None, kwarg=None):
if kwarg is None:
kwargs_support_modes = [True, False]
elif kwarg is False:
kwargs_support_modes = [False]
kwarg = None
else:
kwargs_support_modes = [True]
for kwargs_support in kwargs_support_modes:
arguments = self._create_handler(argspec,
kwargs_support=kwargs_support
).arguments
assert_equals(arguments.minargs, minargs)
assert_equals(arguments.maxargs, maxargs)
assert_equals(arguments.positional, positional)
assert_equals(arguments.defaults, defaults)
assert_equals(arguments.varargs, vararg)
assert_equals(arguments.kwargs, kwarg)
def _assert_fails(self, error, argspec=None, doc=None):
assert_raises_with_msg(DataError, error,
self._create_handler, argspec, doc)
def _create_handler(self, argspec=None, doc=None, kwargs_support=False):
lib = LibraryMock('TEST CASE')
if kwargs_support:
lib.run_keyword = lambda name, args, kwargs: None
else:
lib.run_keyword = lambda name, args: None
lib.run_keyword.__name__ = 'run_keyword'
doc = GetKeywordDocumentation(lib)._handle_return_value(doc)
argspec = GetKeywordArguments(lib)._handle_return_value(argspec)
return DynamicHandler(lib, 'mock', RunKeyword(lib), doc, argspec)
if utils.JYTHON:
handlers = dict((me
|
SmartSearch/Edge-Node
|
LinkedDataManager/feed_generator/ldm_feeder.py
|
Python
|
mpl-2.0
| 8,016
| 0.013348
|
##
##SMART FP7 - Search engine for MultimediA enviRonment generated contenT
##Webpage: http://smartfp7.eu
##
## This Source Code Form is subject to the terms of the Mozilla Public
## License, v. 2.0. If a copy of the MPL was not distributed with this
## file, You can obtain one at http://mozilla.org/MPL/2.0/.
##
## The Original Code is Copyright (c) 2012-2013 Atos
## All Rights Reserved
##
## Contributor(s):
## Jose Miguel Garrido, jose.garridog at atos dot net
##
"""The third Multimedia Data Manager.
This module stores the metadata from XML files to a SQLite database.
The video generator uses this database to create the actual video clips"""
# This file must work in python >2.7 and >3.3
import sys
p_v = 2 if sys.version_info < (3,) else 3
if p_v == 2:
import urllib, urllib2
import ConfigParser as cp
else:
import urllib.request, urllib.parse, urllib.error
import configparser as cp
import json
import couchdb
import argparse
import logging
import time, datetime
def getConf(filename,section):
dict1 = {}
config = cp.ConfigParser()
config.read(filename)
options = config.options(section)
for option in options:
try:
dict1[option] = config.get(section, option)
except:
print("exception on {}!".format(option))
dict1[option] = None
dict1["wait_time"] = int(dict1["wait_time"])
dict1["couch_server"] = dict1["couch_server"] if (dict1["couch_server"]!="None") else None
return dict1
def createURL(conf):
query = { "@id": conf["id"] }
if conf["search_type"] == "textual":
command = "txtSearch"
if conf["search_for"] == "venue":
target = "venues"
else:
target = "activities"
if p_v == 2:
url = '{}/{}/{}?label=%22{}%22'.format(conf["url_base"],command,
target,
urllib.quote(conf["keywords"]))
else:
url = '{}/{}/{}?label=%22{}%22'.format(conf["url_base"],command,
target,
urllib.parse.quote(conf["keywords"]))
query.update({ "keywords":conf["keywords"].split(),
"searched_item":conf["search_for"],
"search_type":"textual" })
elif conf["search_type"] == "geo-search":
command = "structuredSearch"
query.update({"search_type":"geo-search"})
if conf["search_for"] == "venue":
query.update({"searched_item":"venues"})
if conf["coord_type"] == "square":
target = "locRec"
query.update({"search_coords":[conf["coord1_long"],conf["coord1_lat"],
conf["coord2_long"],conf["coord2_lat"]]})
else:
target = "locCirc"
query.update({"search_coords":[conf["coord1_long"],conf["coord1_lat"],
conf["radius"]]})
else:
query.update({"searched_item":"activities"})
if conf["coord_type"] == "square":
target = "actRec"
query.update({"search_coords":[conf["coord1_long"],conf["coord1_lat"],
conf["coord2_long"],conf["coord2_lat"]]})
else:
target = "actCirc"
query.update({"search_coords":[conf["coord1_long"],conf["coord1_lat"],
conf["radius"]]})
if target in ("actCirc","locCirc"):
url = '{}/{}/{}?lat1={}&long1={}&radius={}'.format(conf["url_base"],
command,
target,
conf["coord1_lat"],
conf["coord1_long"],
conf["radius"])
else:
url = '{}/{}/{}?lat1={}&long1={}&lat2={}&long2={}'.format(conf["url_base"],
command,target,
conf["coord1_lat"],
conf["coord1_long"],
conf["coord2_lat"],
conf["coord2_long"])
logging.debug(url)
logging.debug(query)
return url, query
def formatItem(key,doc,time_query,query_info,num):
data = {}
data["time"] = time_query
ldm_result = {}
ldm_result.update(query_info)
ldm_result["key"] = key
if query_info["search_type"] == "textual":
ldm_result["location"] = doc["location"]
else:
ldm_result["location"] = [i["location"] for i in doc["location"]]
ldm_result["location_long"] = [i["long"] for i in doc["location"]]
ldm_result["location_lat"] = [i["lat"] for i in doc["location"]]
if "isPrimaryTopicOf" in doc:
ldm_result["is_primary_topic_of"] = doc["isPrimaryTopicOf"]
if "txt" in doc:
ldm_result["txt"] = doc["txt"]
if "label" in doc:
ldm_result["label"] = doc["label"]
if "date" in doc:
ldm_result["date"] = doc["date"]
if "name" in doc:
ldm_result["name"] = doc["name"]
if "attendance" in doc:
ldm_result["attendance"] = doc["attendance"]
data["ldm_result"] = ldm_result
timestamp = time.time()+(num/1000.0)
time_txt = datetime.datetime.utcfromtimestamp(timestamp).isoformat()+"Z"
item = { "_id":time_txt, "data":data, "timestamp":str(int(timestamp*1000))}
# check for not intended results
remainder = set(doc.keys()) - set(("location", "isPrimaryTopicOf", "txt", "label","date","name","attendance") )
if remainder:
logging.warning("WARNING")
logging.warning(remainder)
logging.debug(item)
return item
def storeIte
|
m(db,item):
db.save(item)
if __name__ == '__main__':
#inicialization
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s-> %(message)s')
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--conf_file",type=str,
help="configuration file path")
par
|
ser.add_argument("-s", "--section",type=str,
help="section of the configuration to apply")
args = parser.parse_args()
conf_file = args.conf_file if args.conf_file else "ldm_feeder_conf.ini"
section = args.section if args.conf_file else "default"
while True: #until loop
conf = getConf(conf_file,section)
couch = couchdb.Server(conf["couch_server"]) if conf["couch_server"] else couchdb.Server()
db = couch[conf["couch_database"]]
#the program itself
url, query_info = createURL(conf)
if p_v == 2:
response = urllib2.urlopen(url).read()
else:
response = urllib.request.urlopen(url).read()
response = response.decode("utf-8")
response = json.loads(response)
if "locations" in response["data"]:
items = "locations"
elif "activities" in response["data"]:
items = "activities"
for num, i in enumerate(response["data"][items]):
responseItem = formatItem(i,response["data"][items][i],
response["data"]["time"],query_info, num)
storeItem(db, responseItem)
if conf["wait_time"] == 0:
break
else:
time.sleep(conf["wait_time"])
|
dajohnso/cfme_tests
|
cfme/tests/infrastructure/test_esx_direct_host.py
|
Python
|
gpl-2.0
| 3,559
| 0.001686
|
# -*- coding: utf-8 -*-
""" Tests of managing ESX hypervisors directly. If another direct ones will be supported, it should
not be difficult to extend the parametrizer.
"""
import pytest
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.common.provider import DefaultEndpoint
from utils import testgen
from utils.net import resolve_hostname
from utils.version import Version
def pytest_generate_tests(metafunc):
argnames, argvalues, idlist = testgen.providers_by_class(metafunc, [VMwareProvider])
argnames = argnames + ["_host_provider"]
new_idlist = []
new_argvalues = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(zip(argnames, argvalue_tuple))
# TODO
# All this should be replaced with a proper ProviderFilter passed to testgen.providers()
if args['provider'].type != "virtualcenter":
continue
hosts = args['provider'].data.get("hosts", [])
if not hosts:
continue
version = args['provider'].data.get("version")
if version is None:
# No version, no test
continue
if Version(version) < "5.0":
# Ignore lesser than 5
continue
host = hosts[0]
ip_address = resolve_hostname(host["name"])
endpoint = DefaultEndpoint(credentials=host["credentia
|
ls"], hostname=host["name"
|
])
# Mock provider data
provider_data = {}
provider_data.update(args['provider'].data)
provider_data["name"] = host["name"]
provider_data["hostname"] = host["name"]
provider_data["ipaddress"] = ip_address
provider_data.pop("host_provisioning", None)
provider_data["hosts"] = [host]
provider_data["discovery_range"] = {}
provider_data["discovery_range"]["start"] = ip_address
provider_data["discovery_range"]["end"] = ip_address
host_provider = VMwareProvider(
name=host["name"],
ip_address=ip_address,
endpoints=endpoint,
provider_data=provider_data)
argvalues[i].append(host_provider)
idlist[i] = "{}/{}".format(args['provider'].key, host["name"])
new_idlist.append(idlist[i])
new_argvalues.append(argvalues[i])
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.yield_fixture(scope="module")
def host_provider(_host_provider, provider):
if provider.exists:
# Delete original provider's hosts first
for host in provider.hosts:
if host.exists:
host.delete(cancel=False)
# Get rid of the original provider, it would make a mess.
provider.delete(cancel=False)
provider.wait_for_delete()
yield _host_provider
for host in _host_provider.hosts:
if host.exists:
host.delete(cancel=False)
_host_provider.delete(cancel=False)
_host_provider.wait_for_delete()
@pytest.mark.tier(2)
def test_validate(host_provider):
"""Tests that the CFME can manage also just the hosts of VMware.
Prerequisities:
* A CFME and a VMware provider (not setup in the CFME yet).
Steps:
* Use the IP address of a host of the VMware provider and its credentials and use them to
set up a VMware provider.
* Refresh the provider
* The provider should refresh without problems.
"""
host_provider.create()
host_provider.refresh_provider_relationships()
host_provider.validate()
|
google-research/l2p
|
augment/color_util.py
|
Python
|
apache-2.0
| 17,401
| 0.006724
|
# coding=utf-8
# Copyright 2020 The Learning-to-Prompt Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific Learning-to-Prompt governing permissions and
# limitations under the License.
# ==============================================================================
"""Data preprocessing and augmentation from SimCLR."""
import functools
from absl import flags
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
CROP_PROPORTION = 0.875 # Standard for ImageNet.
def random_apply(func, p, x):
"""Randomly apply function func to x with probability p."""
return tf.cond(
tf.less(
tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32),
tf.cast(p, tf.float32)), lambda: func(x), lambda: x)
def random_brightness(image, max_delta, impl='simclrv2'):
"""A multiplicative vs additive change of brightness."""
if impl == 'simclrv2':
factor = tf.random_uniform([], tf.maximum(1.0 - max_delta, 0),
1.0 + max_delta)
image = image * factor
elif impl == 'simclrv1':
|
image = tf.image.random_brightness(image, max_delta=max_delta)
els
|
e:
raise ValueError('Unknown impl {} for random brightness.'.format(impl))
return image
def to_grayscale(image, keep_channels=True):
image = tf.image.rgb_to_grayscale(image)
if keep_channels:
image = tf.tile(image, [1, 1, 3])
return image
def color_jitter(image, strength, random_order=True, impl='simclrv2'):
"""Distorts the color of the image.
Args:
image: The input image tensor.
strength: the floating number for the strength of the color augmentation.
random_order: A bool, specifying whether to randomize the jittering order.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
brightness = 0.8 * strength
contrast = 0.8 * strength
saturation = 0.8 * strength
hue = 0.2 * strength
if random_order:
return color_jitter_rand(
image, brightness, contrast, saturation, hue, impl=impl)
else:
return color_jitter_nonrand(
image, brightness, contrast, saturation, hue, impl=impl)
def color_jitter_nonrand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0,
impl='simclrv2'):
"""Distorts the color of the image (jittering order is fixed).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x, brightness, contrast, saturation, hue):
"""Apply the i-th transformation."""
if brightness != 0 and i == 0:
x = random_brightness(x, max_delta=brightness, impl=impl)
elif contrast != 0 and i == 1:
x = tf.image.random_contrast(x, lower=1 - contrast, upper=1 + contrast)
elif saturation != 0 and i == 2:
x = tf.image.random_saturation(
x, lower=1 - saturation, upper=1 + saturation)
elif hue != 0:
x = tf.image.random_hue(x, max_delta=hue)
return x
for i in range(4):
image = apply_transform(i, image, brightness, contrast, saturation, hue)
image = tf.clip_by_value(image, 0., 1.)
return image
def color_jitter_rand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0,
impl='simclrv2'):
"""Distorts the color of the image (jittering order is random).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x):
"""Apply the i-th transformation."""
def brightness_foo():
if brightness == 0:
return x
else:
return random_brightness(x, max_delta=brightness, impl=impl)
def contrast_foo():
if contrast == 0:
return x
else:
return tf.image.random_contrast(
x, lower=1 - contrast, upper=1 + contrast)
def saturation_foo():
if saturation == 0:
return x
else:
return tf.image.random_saturation(
x, lower=1 - saturation, upper=1 + saturation)
def hue_foo():
if hue == 0:
return x
else:
return tf.image.random_hue(x, max_delta=hue)
x = tf.cond(
tf.less(i, 2),
lambda: tf.cond(tf.less(i, 1), brightness_foo, contrast_foo),
lambda: tf.cond(tf.less(i, 3), saturation_foo, hue_foo))
return x
perm = tf.random_shuffle(tf.range(4))
for i in range(4):
image = apply_transform(perm[i], image)
image = tf.clip_by_value(image, 0., 1.)
return image
def _compute_crop_shape(image_height, image_width, aspect_ratio,
crop_proportion):
"""Compute aspect ratio-preserving shape for central crop.
The resulting shape retains `crop_proportion` along one side and a proportion
less than or equal to `crop_proportion` along the other side.
Args:
image_height: Height of image to be cropped.
image_width: Width of image to be cropped.
aspect_ratio: Desired aspect ratio (width / height) of output.
crop_proportion: Proportion of image to retain along the less-cropped side.
Returns:
crop_height: Height of image after cropping.
crop_width: Width of image after cropping.
"""
image_width_float = tf.cast(image_width, tf.float32)
image_height_float = tf.cast(image_height, tf.float32)
def _requested_aspect_ratio_wider_than_image():
crop_height = tf.cast(
tf.rint(crop_proportion / aspect_ratio * image_width_float), tf.int32)
crop_width = tf.cast(tf.rint(crop_proportion * image_width_float), tf.int32)
return crop_height, crop_width
def _image_wider_than_requested_aspect_ratio():
crop_height = tf.cast(
tf.rint(crop_proportion * image_height_float), tf.int32)
crop_width = tf.cast(
tf.rint(crop_proportion * aspect_ratio * image_height_float), tf.int32)
return crop_height, crop_width
return tf.cond(aspect_ratio > image_width_float / image_height_float,
_requested_aspect_ratio_wider_than_image,
_image_wider_than_requested_aspect_ratio)
def center_crop(image, height, width, crop_proportion):
"""Crops to center of image and rescales to desired size.
Args:
image: Image Tensor to crop.
height: Height of image to be cropped.
width: Width of image to be cropped.
crop_proportion: Proportion of image to retain along the less-cropped side.
Returns:
A `height` x `width` x channels Tensor holding a central crop of `image`.
"""
shape = tf.shape(image)
image_height = shape[0]
image_width = shape[1]
crop_height, crop_width = _compute_crop_shape(image_height, image_width,
|
juntatalor/qexx
|
cart/urls.py
|
Python
|
mit
| 768
| 0.004049
|
from django.conf.urls
|
import patterns, url
from cart import views
urlpatterns = patterns('',
url(r'^$', views.view_cart, name='view'),
url(r'^add/$', views.add_to_cart, name='add'),
url(r'^remove/$', views.remove_from_cart, name='remove'),
|
url(r'^update/$', views.update_cart, name='update'),
url(r'^checkout/$', views.checkout, name='checkout'),
url(r'^update_checkout/$', views.update_checkout, name='update_checkout'),
# Для ajax обновления виджета корзины
url(r'^summary/$', views.get_cart_summary, name='get_cart_summary'),
)
|
kk6/tssm
|
app.py
|
Python
|
mit
| 5,552
| 0.001441
|
# -*- coding: utf-8 -*-
import json
import os
import urllib.parse
from functools import wraps
import bottle
from bottle import (
route,
run,
jinja2_template as template,
redirect,
request,
response,
static_file,
BaseTemplate,
)
import tweepy
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_DIR = os.path.join(BASE_DIR, 'static')
BaseTemplate.settings.update(
{
'filters': {
'encode_query': lambda query: urllib.parse.urlencode({'q': query})
}
}
)
#######################################################################################################################
#
# Middleware
#
#######################################################################################################################
class TwitterManager(object):
def __init__(self, consumer_key, consumer_secret, access_token=None,
access_token_secret=None, callback_url=None):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = access_token
self.access_token_secret = access_token_secret
self.callback_url = callback_url
self.request_token = None
self.api = None
def get_authorization_url(self):
auth = tweepy.OAuthHandler(self.consumer_key,
self.consumer_secret,
self.callback_url)
try:
redirect_url = auth.get_authorization_url()
except tweepy.TweepError:
raise tweepy.TweepError('Error! Failed to get request token')
self.request_token = auth.request_token
return redirect_url
def get_access_token(self, verifier):
auth = tweepy.OAuthHandler(self.consumer_key,
self.consumer_secret)
if self.request_token is None:
raise tweepy.TweepError("Request token not set yet.")
auth.request_token = self.request_token
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
raise tweepy.TweepError('Error! Failed to get access token')
return (
auth.access_token,
auth.access_token_secret,
)
def set_access_token(self, key, secret):
self.access_token = key
self.access_token_secret = secret
def get_oauth_api(self, access_token, access_token_secret):
auth = tweepy.OAuthHandler(self.consumer_key,
self.consumer_secret)
auth.set_access_token(access_token, access_token_secret)
return tweepy.API(auth)
def set_api(self):
self.api = self.get_oauth_api(self.access_token, self.access_token_secret)
def authenticate(self, verifier):
token = self.get_access_token(verifier)
self.set_access_token(*token)
self.set_api()
class TwitterMiddleware(object):
def __init__(self, app, tweepy_config):
self.app = app
self.tweepy_settings = tweepy_config
self.tweepy_manager =
|
TwitterManager(**self.tweepy_settings)
def __call__(self, environ, start_response):
environ['twitter'] = self.tweepy_manager
return self.app(environ, start_response)
#######################################################################################################################
#
# Decorators
#
########################
|
###############################################################################################
def login_required(f):
@wraps(f)
def _login_required(*args, **kwargs):
twitter = request.environ.get('twitter')
if twitter.api is None:
return redirect('/')
return f(*args, **kwargs)
return _login_required
#######################################################################################################################
#
# Controllers
#
#######################################################################################################################
@route('/static/<filename:path>')
def send_static(filename):
return static_file(filename, root=STATIC_DIR)
@route('/')
def index():
return template('index')
@route('/oauth')
def oauth():
twitter = request.environ.get('twitter')
redirect_url = twitter.get_authorization_url()
return redirect(redirect_url)
@route('/verify')
def verify():
twitter = request.environ.get('twitter')
verifier = request.params.get('oauth_verifier')
twitter.authenticate(verifier)
return redirect('home')
@route('/home')
@login_required
def home():
twitter = request.environ.get('twitter')
user = twitter.api.me()
return template('home', user=user)
@route('/api/saved_searches/list')
@login_required
def get_saved_searches():
twitter = request.environ.get('twitter')
saved_searches = twitter.api.saved_searches()
data = []
for s in saved_searches:
timestamp = s.created_at.strftime('%Y-%m-%d %H:%M:%S')
data.append({'id': s.id, 'name': s.name, 'query': s.query, 'timestamp': timestamp})
response.headers['Content-Type'] = 'application/json'
return json.dumps(data)
if __name__ == "__main__":
twitter_config = {
'consumer_key': os.environ['TSSM_CONSUMER_KEY'],
'consumer_secret': os.environ['TSSM_CONSUMER_SECRET'],
'callback_url': 'http://127.0.0.1:8000/verify',
}
app = TwitterMiddleware(bottle.app(), twitter_config)
run(app=app, host="localhost", port=8000, debug=True, reloader=True)
|
be-cloud-be/horizon-addons
|
server-tools/date_range/wizard/date_range_generator.py
|
Python
|
agpl-3.0
| 2,576
| 0
|
# -*- coding: utf-8 -*-
# © 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import api, fields, models
from dateutil.rrule import (rrule,
YEARLY,
MONTHLY,
WEEKLY,
DAILY)
from dateutil.relativedelta import relativedelta
class DateRangeGenerator(models.TransientModel):
_name = 'date.range.generator'
@api.model
def _default_company(self):
return self.env['res.company']._company_default_get('date.range')
name_prefix = fields.Char('Range name prefix', required=True)
date_start = fields.Date(strint='Start date', required=True)
type_id = fields.Many2one(
comodel_name='date.range.type', string='Type', required=True,
ondelete='cascade')
company_id = fields.Many2one(
comodel_name='res.company', string='Company',
default=_default_company)
unit_of_time = fields.Selection([
(YEARLY, 'years'),
(MONTHLY, 'months'),
(WEEKLY, 'weeks'),
(DAILY, 'days')], required=True)
duration_count = fields.Integer('Duration', required=True)
count = fields.Integer(
string="Number of ranges to generate", required=True)
@api.multi
def _compute_date_ranges(self):
self.ensure_one()
vals = rrule(freq=self.unit_of_time, interval=self.duration_count,
dtstart=fields.Date.from_string(self.date_start),
count=self.count+1)
vals = list(vals)
date_ranges = []
for idx, dt_start in enumerate(vals[:-1]):
date_start = fields.Date.to_string(dt_start.date())
# always remove 1 day for the date_end since range limits are
# inclusive
dt_end = vals[idx+1].date() - relativedelta(days=1)
date_end = fields.Date.to_string(dt_end)
date_ranges.append({
'name': '%s-%d' % (self.name_prefix, idx + 1),
|
'date_start': date_start,
'date_end': date_end,
'type_id': self.type_id.id,
'company_id': self.company_id.id})
return date_ranges
@api.multi
def action_apply(self):
date_ranges = self._compute_date_ranges()
if date_ranges:
for dr in date_ranges:
self.env['date.range'].create(dr)
|
return self.env['ir.actions.act_window'].for_xml_id(
module='date_range', xml_id='date_range_action')
|
DTOcean/dtocean-core
|
dtocean_core/interfaces/plots_installation.py
|
Python
|
gpl-3.0
| 18,785
| 0.000799
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Adam Collin, Mathew Topper
# Copyright (C) 2017-2018 Mathew Topper
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Created on Wed Apr 06 15:59:04 2016
.. moduleauthor:: Adam Collin <adam.collin@ieee.org>
.. moduleauthor:: Mathew Topper <mathew.topper@dataonlygreater.com>
"""
from datetime import timedelta
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from matplotlib.dates import (MONTHLY,
DateFormatter,
RRuleLocator,
date2num,
rrulewrapper)
from textwrap import wrap
from . import PlotInterface
class InstallationGanttChartPlot(PlotInterface):
@classmethod
def get_name(cls):
'''A class method for the common name of the interface.
Returns:
str: A unique string
'''
return "Installation Gantt Chart"
@classmethod
def declare_inputs(cls):
'''A class method to declare all the variables required as inputs by
this interface.
Returns:
list: List of inputs identifiers
Example:
The returned value can be None or a list of identifier strings which
appear in the data descriptions. For example::
inputs = ["My:first:variable",
"My:second:variable",
]
'''
input_list = [
"project.install_support_structure_dates",
"project.install_devices_dates",
"project.install_dynamic_cable_dates",
"project.install_export_cable_dates",
"project.install_array_cable_dates",
"project.install_surface_piercing_substation_dates",
"project.install_subsea_collection_point_dates",
"project.install_cable_protection_dates",
"project.install_driven_piles_dates",
"project.install_direct_embedment_dates",
"project.install_gravity_based_dates",
"project.install_pile_anchor_dates",
"project.install_drag_embedment_dates",
"project.install_suction_embedment_dates",
"project.device_phase_installation_times",
"project.electrical_phase_installation_times",
"project.mooring_phase_installation_times",
"project.installation_plan"]
return input_list
@classmethod
def declare_optional(cls):
option_list = [
"project.install_support_structure_dates",
"project.install_devices_dates",
"project.install_dynamic_cable_dates",
"project.install_export_cable_dates",
"project.install_array_cable_dates",
"project.install_surface_piercing_substation_dates",
"project.install_subsea_collection_point_dates",
"project.install_cable_protection_dates",
"project.install_driven_piles_dates",
"project.install_direct_embedment_dates",
"project.install_gravity_based_dates",
"project.install_pile_anchor_dates",
"project.install_drag_embedment_dates",
"project.install_suction_embedment_dates",
"project.device_phase_installation_times",
"project.electrical_phase_installation_times",
"project.mooring_phase_installation_times",
"project.installation_plan"]
return option_list
@classmethod
def declare_id_map(self):
'''Declare the mapping for variable identifiers in the data description
to local names for use in the interface. This helps isolate changes in
the data description or interface from effecting the other.
Returns:
dict: Mapping of local to data description variable identifiers
Example:
The returned value must be a dictionary containing all the inputs and
outputs from the data description and a local alias string. For
example::
id_map = {"var1": "My:first:variable",
"var2": "My:second:variable",
"var3": "My:third:variable"
}
'''
id_map = {"install_support_structure_dates":
"project.install_support_structure_dates",
"install_devices_dates":
"project.install_devices_dates",
"install_dynamic_cable_dates":
"project.install_dynamic_cable_dates",
"install_export_cable_dates":
"project.install_export_cable_dates",
"install_array_cable_dates":
"project.install_array_cable_dates",
"install_surface_piercing_substation_dates":
"project.install_surface_piercing_substation_dates",
"install_subsea_collection_point_dates":
"project.install_subsea_collection_point_dates",
"install_cable_protection_dates":
"project.install_cable_protection_dates",
"install_driven_piles_dates":
"project.install_driven_piles_dates",
"install_direct_embedment_dates":
"project.install_direct_embedment_dates",
"install_gravity_based_dates":
"project.install_gravity_based_dates",
"install_pile_anchor_dates":
"project.install_pile_anchor_dates",
"install_drag_embedment_dates":
"project.install_drag_embedment_dates",
"install_suction_embedment_dates":
"project.install_suction_embedment_dates",
"install_device_times":
"project.device_phase_installation_times",
"install_electrical_times":
"project.electrical_pha
|
se_installation_times",
"install_mooring_times":
"project.mooring_phase_installation_t
|
imes",
"plan": "project.installation_plan"
}
return id_map
def connect(self):
self.fig_handle = installation_gantt_chart(
self.data.plan,
self.data.install_support_structure_dates,
self.data.install_devices_dates,
self.data.install_dynamic_cable_dates,
self.data.install_export_cable_dates,
self.data.install_array_cable_dates,
self.data.install_surface_piercing_substation_dates,
self.data.install_subsea_collection_point_dates,
self.data.install_cable_protection_dates,
self.data.install_driven_piles_dates,
self.data.install_direct_embedment_dates,
self.data.install_gravity_based_dates,
self.data.install_pile_anchor_dates,
self.data.install_drag_embedment_dates,
self.data.install_suction_embedment_dates,
self.data.install_device_times,
self.data.install_electrical_times,
self.data.install_mooring_times)
return
def installation_
|
TheRealVestige/VestigeX-Server
|
Data/scripts/player/objects/objectclick2.py
|
Python
|
gpl-3.0
| 196
| 0.02551
|
from s
|
erver.util import ScriptManager
def objectClick2_2213(player, obId, obX, obY):
player.getPA().openUpBank()
def objectClick2_11758(player, obId, obX, obY):
|
player.getPA().openUpBank()
|
opentrials/opentrials-airflow
|
dags/data_contributions.py
|
Python
|
mpl-2.0
| 718
| 0
|
import datetime
from airflow.models import DAG
from airflow.operators.latest_only_operator import LatestOnlyOperator
import utils.helpers as helpers
args
|
= {
'owner': 'airflow',
'depends_on_past':
|
False,
'start_date': datetime.datetime(2017, 3, 1),
'retries': 1,
'retry_delay': datetime.timedelta(minutes=10),
}
dag = DAG(
dag_id='data_contributions',
default_args=args,
max_active_runs=1,
schedule_interval='@daily'
)
latest_only_task = LatestOnlyOperator(
task_id='latest_only',
dag=dag,
)
data_contributions_processor_task = helpers.create_processor_task(
name='data_contributions',
dag=dag
)
data_contributions_processor_task.set_upstream(latest_only_task)
|
LubyRuffy/spiderfoot
|
modules/sfp_template.py
|
Python
|
gpl-2.0
| 2,631
| 0.00152
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_XXX
# Purpose: Description of the plug-in.
#
# Author: Name and e-mail address
#
# Created: Date
# Copyright: (c) Name
# Licence: GPL
# -------------------------------------------------------------------------------
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_XXX(SpiderFootPlugin):
"""Name:Description"""
# Default options
opts = {}
# Option descriptions
optdescs = {
# For each option in opts you should have a key/value pair here
# describing it. It will end up in the UI to explain the option
# to the end-user.
}
# Be sure to completely clear any class variables in setup()
# or you run the risk of data persisting between scan runs.
# Target
results = dict()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = dict()
# Clear / reset any other class member variables here
|
# or you risk them persisting between threads.
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
# * = be notified about all events.
def watchedEvents(self):
return ["*"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def
|
producedEvents(self):
return None
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
# If you are processing TARGET_WEB_CONTENT from sfp_spider, this is how you
# would get the source of that raw data (e.g. a URL.)
eventSource = event.sourceEvent.data
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
# DO SOMETHING HERE
# Notify other modules of what you've found
evt = SpiderFootEvent("EVENT_CODE_HERE", "data here", self.__name__, event.sourceEvent)
self.notifyListeners(evt)
return None
# If you intend for this module to act on its own (e.g. not solely rely
# on events from other modules, then you need to have a start() method
# and within that method call self.checkForStop() to see if you've been
# politely asked by the controller to stop your activities (user abort.)
# End of sfp_XXX class
|
overfly83/bjrobot
|
src/BJRobot/keywords/logging.py
|
Python
|
mit
| 1,462
| 0.004788
|
import os
import sys
from robot.api import logger
from keywordgroup import KeywordGroup
from robot.libraries.BuiltIn import BuiltIn
try:
from robot.libraries.BuiltIn import RobotNotRunningError
except ImportError:
RobotNotRunningError = AttributeError
class Logging(KeywordGroup):
# Private
def _debug(self, message):
logger.debug(message)
def _get_log_dir(self):
try:
variables = BuiltIn().get_variables()
logfile = variables['${LOG FILE}']
if logfile != 'NONE':
return os.path.dirname(logfile)
return variables['${OUTPUTDIR}']
except RobotNotRunningError:
return os.getcwd()
def _html(self, message):
logger.info(message, True, False)
def _info(self, message):
logger.info(message)
def _log(self, message, level='INFO'):
level = level.upper()
if (level == 'INFO'): self._info(message)
elif (level == 'DEBUG'): self._debug(message)
elif (level == 'WARN'): self._warn(message)
elif (level == 'HTML'): self._html(messa
|
ge)
def _log_list(self, items, what='item'):
msg = ['Altogether %d %s%s.' % (len(items), what, ['s',''][len(items)==1])]
for index, item in enumerate(items):
msg.append('%d: %s' % (index+1, item))
self._info('\n'.join(msg))
|
return items
def _warn(self, message):
logger.warn(message)
|
Kozea/WeasyPrint
|
weasyprint/layout/replaced.py
|
Python
|
bsd-3-clause
| 11,239
| 0
|
"""Layout for images and other replaced elements.
See http://dev.w3.org/csswg/css-images-3/#sizing
"""
from .min_max import handle_min_max_height, handle_min_max_width
from .percent import percentage
def default_image_sizing(intrinsic_width, intrinsic_height, intrinsic_ratio,
specified_width, specified_height,
default_width, default_height):
"""Default sizing algorithm for the concrete object size.
Return a ``(concrete_width, concrete_height)`` tuple.
See http://dev.w3.org/csswg/css-images-3/#default-sizing
"""
if specified_width == 'auto':
specified_width = None
if specified_height == 'auto':
specified_height = None
if specified_width is not None and specified_height is not None:
return specified_width, specified_height
elif specified_width is not None:
return specified_width, (
specified_width / intrinsic_ratio if intrinsic_ratio is not None
else intrinsic_height if intrinsic_height is not None
else default_height)
elif specified_height is not None:
return (
specified_height * intrinsic_ratio if intrinsic_ratio is not None
else intrinsic_width if intrinsic_width is not None
else default_width
), specified_height
else:
if intrinsic_width is not None or intrinsic_height is not None:
return default_image_sizing(
intrinsic_width, intrinsic_height, intrinsic_ratio,
intrinsic_width, intrinsic_height, default_width,
default_height)
else:
return contain_constraint_image_sizing(
default_width, default_height, intrinsic_ratio)
def contain_constraint_image_sizing(constraint_width, constraint_height,
intrinsic_ratio):
"""Contain constraint sizing algorithm for the concrete object size.
Return a ``(concrete_width, concrete_height)`` tuple.
See http://dev.w3.org/csswg/css-images-3/#contain-constraint
"""
return _constraint_image_sizing(
constraint_width, constraint_height, intrinsic_ratio, cover=False)
def cover_constraint_image_sizing(constraint_width, constraint_height,
intrinsic_ratio):
"""Cover constraint sizing algorithm for the concrete object size.
Return a ``(concrete_width, concrete_height)`` tuple.
See http://dev.w3.org/csswg/css-images-3/#cover-constraint
"""
return _constraint_image_sizing(
constraint_width, constraint_height, intrinsic_ratio, cover=True)
def _constraint_image_sizing(constraint_width, constraint_height,
intrinsic_ratio, cover):
if intrinsic_ratio is None:
return constraint_width, constraint_height
elif cover ^ (constraint_width > constraint_height * intrinsic_ratio):
return constraint_height * intrinsic_ratio, constraint_height
else:
return constraint_width, constraint_width / intrinsic_ratio
def replacedbox_layout(box):
# TODO: respect box-sizing ?
object_fit = box.style['object_fit']
position = box.style['object_position']
image = box.replacement
intrinsic_width, intrinsic_height, intrinsic_ratio = (
image.get_intrinsic_size(
box.style['image_resolution'], box.style['font_size']))
if None in (intrinsic_width, intrinsic_height):
intrinsic_width, intrinsic_height = contain_constraint_image_sizing(
box.width, box.height, intrinsic_ratio)
if object_fit == 'fill':
draw_width, draw_height = box.width, box.height
else:
if object_fit == 'contain' or object_fit == 'scale-down':
draw_width, draw_height = contain_constraint_image_sizing(
box.width, box.height, intrinsic_ratio)
elif object_fit == 'cover':
draw_width, draw
|
_height = cover_constraint_image_sizing(
box.width, box.height, intrinsic_ratio)
else:
assert object_fit == 'none', object_fit
draw_width, draw_height = intrinsic_width, intrinsic_height
if object_fit == 'scale-down':
draw_width = min(draw_width, intrinsic_width)
draw_height = min(draw_height, intrinsic_height)
origin_x, position_x, origin_y, position_y = position[0]
ref_x = box.width - draw_
|
width
ref_y = box.height - draw_height
position_x = percentage(position_x, ref_x)
position_y = percentage(position_y, ref_y)
if origin_x == 'right':
position_x = ref_x - position_x
if origin_y == 'bottom':
position_y = ref_y - position_y
position_x += box.content_box_x()
position_y += box.content_box_y()
return draw_width, draw_height, position_x, position_y
@handle_min_max_width
def replaced_box_width(box, containing_block):
"""Set the used width for replaced boxes."""
from .block import block_level_width
width, height, ratio = box.replacement.get_intrinsic_size(
box.style['image_resolution'], box.style['font_size'])
# This algorithm simply follows the different points of the specification:
# http://www.w3.org/TR/CSS21/visudet.html#inline-replaced-width
if box.height == 'auto' and box.width == 'auto':
if width is not None:
# Point #1
box.width = width
elif ratio is not None:
if height is not None:
# Point #2 first part
box.width = height * ratio
else:
# Point #3
block_level_width(box, containing_block)
if box.width == 'auto':
if ratio is not None:
# Point #2 second part
box.width = box.height * ratio
elif width is not None:
# Point #4
box.width = width
else:
# Point #5
# It's pretty useless to rely on device size to set width.
box.width = 300
@handle_min_max_height
def replaced_box_height(box):
"""Compute and set the used height for replaced boxes."""
# http://www.w3.org/TR/CSS21/visudet.html#inline-replaced-height
width, height, ratio = box.replacement.get_intrinsic_size(
box.style['image_resolution'], box.style['font_size'])
# Test 'auto' on the computed width, not the used width
if box.height == 'auto' and box.width == 'auto':
box.height = height
elif box.height == 'auto' and ratio:
box.height = box.width / ratio
if box.height == 'auto' and box.width == 'auto' and height is not None:
box.height = height
elif ratio is not None and box.height == 'auto':
box.height = box.width / ratio
elif box.height == 'auto' and height is not None:
box.height = height
elif box.height == 'auto':
# It's pretty useless to rely on device size to set width.
box.height = 150
def inline_replaced_box_layout(box, containing_block):
"""Lay out an inline :class:`boxes.ReplacedBox` ``box``."""
for side in ['top', 'right', 'bottom', 'left']:
if getattr(box, f'margin_{side}') == 'auto':
setattr(box, f'margin_{side}', 0)
inline_replaced_box_width_height(box, containing_block)
def inline_replaced_box_width_height(box, containing_block):
if box.style['width'] == 'auto' and box.style['height'] == 'auto':
replaced_box_width.without_min_max(box, containing_block)
replaced_box_height.without_min_max(box)
min_max_auto_replaced(box)
else:
replaced_box_width(box, containing_block)
replaced_box_height(box)
def min_max_auto_replaced(box):
"""Resolve min/max constraints on replaced elements with 'auto' sizes."""
width = box.width
height = box.height
min_width = box.min_width
min_height = box.min_height
max_width = max(min_width, box.max_width)
max_height = max(min_height, box.max_height)
# (violation_width, violation_height)
violations = (
'min' if width < min_width else 'max' if width > max_width else '',
'min' if height < min_height else 'max' if
|
rmwdeveloper/webhack
|
webhack/__init__.py
|
Python
|
mit
| 38
| 0.026316
|
from __futu
|
re__ import absolute_im
|
port
|
Cawb07/t1-python
|
terminalone/models/vertical.py
|
Python
|
bsd-3-clause
| 596
| 0
|
# -*- coding: utf-8 -*-
"""Provides vertical object.""
|
"
from __future__ import absolute_import
from ..entity import Entity
class Vertical(Entity):
"""docstring for Vertical."""
collection = 'verticals'
resource = 'vertical'
_relations = {
'advertiser',
}
_pull = {
'id': int,
'name': None,
'created_on': Entity._strpt,
'updated_on': Entity._strpt,
'version': int,
}
_push = _pull
def __init__(self, session, properties=None, **kwargs):
super(Vertical,
|
self).__init__(session, properties, **kwargs)
|
johan--/Geotrek
|
geotrek/tourism/management/commands/sync_rando.py
|
Python
|
bsd-2-clause
| 1,966
| 0.002543
|
from django.conf import settings
from django.utils import translation
from geotrek.tourism import models as tourism_models
from geotrek.tourism.views import TouristicContentViewSet, TouristicEventViewSet
from geotrek.trekking.management.commands.sync_rando import Command as BaseCommand
# Register mapentity models
from geotrek.tourism import urls # NOQA
class Command(BaseCommand):
def sync_content(self, lang, content):
self.sync_pdf(lang, content)
for picture, resized in content.resized_pictures:
self.sync_media_file(lang, resized)
def sync_event(self, lang, event):
self.sync_pdf(lang, event)
for picture, resized in event.resized_pictures:
self.sync_media_file(lang, resized)
def sync_tourism(self, lang):
self.sync_geojson(lang, TouristicContentViewSet, 'touristiccontents')
self.sync_geojson(lang, TouristicEventViewSet, 'touristicevents')
contents = tourism_models.TouristicContent.objects.existing().order_by('pk')
contents = contents.filter(**{'published_{lang}'.format(lang=lang): True})
for content in contents:
self.sync_content(lang, content)
events = tourism_models.TouristicEvent.objects.existing().order_by('pk')
events = events.filter(**{'published_{lang}'.format(lang=lang): True})
for event in events:
self.sync_event(lang, event)
def sync(self):
super(Command, self).sync()
self.sync_static_file('**', 'tourism/touristicevent.svg')
self.syn
|
c_pictograms('**', tourism_models.InformationDeskType)
self.sync_pictograms('**', tourism_models.TouristicContentCategory)
self.sync_pictograms('**', tourism_models.TouristicContentType)
self.sync_pictograms('**', tour
|
ism_models.TouristicEventType)
for lang in settings.MODELTRANSLATION_LANGUAGES:
translation.activate(lang)
self.sync_tourism(lang)
|
delftrobotics/keras-retinanet
|
tests/models/test_densenet.py
|
Python
|
apache-2.0
| 1,587
| 0.00063
|
"""
Copyright 2018 vidosits (https://github.com/vidosits/)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF
|
ANY KIND, either express or implied.
See the License for the specific language governing permissions and
|
limitations under the License.
"""
import warnings
import pytest
import numpy as np
from tensorflow import keras
from keras_retinanet import losses
from keras_retinanet.models.densenet import DenseNetBackbone
parameters = ['densenet121']
@pytest.mark.parametrize("backbone", parameters)
def test_backbone(backbone):
# ignore warnings in this test
warnings.simplefilter('ignore')
num_classes = 10
inputs = np.zeros((1, 200, 400, 3), dtype=np.float32)
targets = [np.zeros((1, 14814, 5), dtype=np.float32), np.zeros((1, 14814, num_classes + 1))]
inp = keras.layers.Input(inputs[0].shape)
densenet_backbone = DenseNetBackbone(backbone)
model = densenet_backbone.retinanet(num_classes=num_classes, inputs=inp)
model.summary()
# compile model
model.compile(
loss={
'regression': losses.smooth_l1(),
'classification': losses.focal()
},
optimizer=keras.optimizers.Adam(lr=1e-5, clipnorm=0.001))
model.fit(inputs, targets, batch_size=1)
|
scheib/chromium
|
third_party/protobuf/benchmarks/util/result_parser.py
|
Python
|
bsd-3-clause
| 8,710
| 0.00907
|
# This import depends on the automake rule protoc_middleman, please make sure
# protoc_middleman has been built before run this file.
import json
import re
import os.path
# BEGIN OPENSOURCE
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
# END OPENSOURCE
import tmp.benchmarks_pb2 as benchmarks_pb2
__file_size_map = {}
def __get_data_size(filename):
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + "/../" + filename
if filename in __file_size_map:
return __file_size_map[filename]
benchmark_dataset = benchmarks_pb2.BenchmarkDataset()
benchmark_dataset.ParseFromString(
open(filename, "rb").read())
size = 0
count = 0
for payload in benchmark_dataset.payload:
size += len(payload)
count += 1
__file_size_map[filename] = (size, 1.0 * size / count)
return size, 1.0 * size / count
def __extract_file_name(file_name):
name_list = re.split(r"[/\.]", file_name)
short_file_name = ""
for name in name_list:
if name[:14] == "google_message":
short_file_name = name
return short_file_name
__results = []
# CPP results example:
# [
# "benchmarks": [
# {
# "bytes_per_second": int,
# "cpu_time_ns": double,
# "iterations": int,
# "name: string,
# "real_time_ns: double,
# ...
# },
# ...
# ],
# ...
# ]
def __parse_cpp_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for benchmark in results["benchmarks"]:
data_filename = "".join(
re.split("(_parse_|_serialize)", benchmark["name"])[0])
behavior = benchmark["name"][len(data_filename) + 1:]
if data_filename[:2] == "BM":
data_filename = data_filename[3:]
__results.append({
"language": "cpp",
"dataFilename": data_filename,
"behavior": behavior,
"throughput": benchmark["bytes_per_second"] / 2.0 ** 20
})
# Synthetic benchmark results example:
# [
# "benchmarks": [
# {
# "cpu_time_ns": double,
# "iterations": int,
# "name: string,
# "real_time_ns: double,
# ...
# },
# ...
# ],
# ...
# ]
def __parse_synthetic_result(filename):
if filename == "":
return
if filename[0] != "/":
filename = os.path.dirname(os.path.abspath(__file__)) + "/" + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for benchmark in results["benchmarks"]:
__results.append({
"language": "cpp",
"dataFilename": "",
"behavior": "synthetic",
"throughput": 10.0**9 / benchmark["cpu_time_ns"]
})
# Python results example:
# [
# [
# {
# "filename": string,
# "benchmarks": {
# behavior: results,
# ...
# },
# },
# ...
# ], #pure-python
# ...
# ]
def __parse_python_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results_list = json.loads(f.read())
for results in results_list:
for result in results:
_, avg_size = __get_data_size(result["filename"])
for behavior in result["benchmarks"]:
__results.append({
"language": "python",
"dataFilename": __extract_file_name(result["filename"]),
"behavior": behavior,
"throughput": result["benchmarks"][behavior]
})
# Java results example:
# [
# {
# "id": string,
# "instrumentSpec": {...},
# "measurements": [
# {
# "weight": float,
# "value": {
# "magnitude": float,
# "unit": string
# },
# ...
# },
# ...
# ],
# "run": {...},
# "scenario": {
# "benchmarkSpec": {
# "methodName": string,
# "parameters": {
# defined parameters in the benchmark: parameters value
# },
# ...
# },
# ...
# }
#
# },
# ...
# ]
def __parse_java_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for result in results:
total_weight = 0
total_value = 0
for measurement in result["measurements"]:
total_weight += measurement["weight"]
total_value += measurement["value"]["magnitude"]
avg_time = total_value * 1.0 / total_weight
total_size, _ = __get_data_size(
result["scenario"]["benchmarkSpec"]["parameters"]["dataFile"])
__results.append({
"language": "java",
"throughput": total_size / avg_time * 1e9 / 2 ** 20,
"behavior": result["scenario"]["benchmarkSpec"]["methodName"],
"dataFilename": __extract_file_name(
result["scenario"]["benchmarkSpec"]["parameters"]["dataFile"])
})
# Go b
|
enchmark results:
#
# goos: linux
# goarch: amd64
# Benchmark/.././datasets
|
/google_message2/dataset.google_message2.pb/Unmarshal-12 3000 705784 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Marshal-12 2000 634648 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Size-12 5000 244174 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Clone-12 300 4120954 ns/op
# Benchmark/.././datasets/google_message2/dataset.google_message2.pb/Merge-12 300 4108632 ns/op
# PASS
# ok _/usr/local/google/home/yilunchong/mygit/protobuf/benchmarks 124.173s
def __parse_go_result(filename):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
for line in f:
result_list = re.split(r"[\ \t]+", line)
if result_list[0][:9] != "Benchmark":
continue
first_slash_index = result_list[0].find('/')
last_slash_index = result_list[0].rfind('/')
full_filename = result_list[0][first_slash_index+1:last_slash_index]
total_bytes, _ = __get_data_size(full_filename)
behavior_with_suffix = result_list[0][last_slash_index+1:]
last_dash = behavior_with_suffix.rfind("-")
if last_dash == -1:
behavior = behavior_with_suffix
else:
behavior = behavior_with_suffix[:last_dash]
__results.append({
"dataFilename": __extract_file_name(full_filename),
"throughput": total_bytes / float(result_list[2]) * 1e9 / 2 ** 20,
"behavior": behavior,
"language": "go"
})
# Self built json results example:
#
# [
# {
# "filename": string,
# "benchmarks": {
# behavior: results,
# ...
# },
# },
# ...
# ]
def __parse_custom_result(filename, language):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename, "rb") as f:
results = json.loads(f.read())
for result in results:
_, avg_size = __get_data_size(result["filename"])
for behavior in result["benchmarks"]:
__results.append({
"language": language,
"dataFilename": __extract_file_name(result["filename"]),
"behavior": behavior,
"throughput": result["benchmarks"][behavior]
})
def __parse_js_result(filename, language):
return __parse_custom_result(filename, language)
def __parse_php_result(filename, language):
return __parse_custom_result(filename, language)
def get_result_from_file(cpp_file="",
java_file="",
python_file="",
go_file="",
synthetic_file="",
node_file="",
php_c_file="",
php_f
|
reviewboard/reviewboard
|
reviewboard/accounts/backends/ad.py
|
Python
|
mit
| 18,842
| 0
|
"""Active Directory authentication backend."""
import itertools
import logging
import dns
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
try:
import ldap
from ldap.dn import dn2str, str2dn
from ldap.filter import filter_format
except ImportError:
ldap = None
from reviewboard.accounts.backends.base import BaseAuthBackend
from reviewboard.accounts.forms.auth import ActiveDirectorySettingsForm
logger = logging.getLogger(__name__)
class ActiveDirectoryBackend(BaseAuthBackend):
"""Authenticate a user against an Active Directory server.
This is controlled by the following Django settings:
.. setting:: AD_DOMAIN_CONTROLLER
``AD_DOMAIN_CONTROLLER``:
The domain controller (or controllers) to connect to. This must be
a string, but multiple controllers can be specified by separating
each with a space.
This is ``auth_ad_domain_controller`` in the site configuration.
.. setting:: AD_DOMAIN_NAME
``AD_DOMAIN_NAME``:
The Active Directory domain name. This must be a string.
This is ``auth_ad_domain_name`` in the site configuration.
.. setting:: AD_FIND_DC_FROM_DNS
``AD_FIND_DC_FROM_DNS``:
Whether domain controllers should be found by using DNS. This must be
a boolean.
|
This is ``auth_ad_find_dc_from_dns`` in the site configuration.
.. setting:: AD_GROUP_NAME
``AD_GROUP_NAME``:
The optional name of the group to res
|
trict available users to. This
must be a string.
This is ``auth_ad_group_name`` in the site configuration.
.. setting:: AD_OU_NAME
``AD_OU_NAME``:
The optional name of the Organizational Unit to restrict available users
to. This must be a string.
This is ``auth_ad_ou_name`` in the site configuration.
.. setting:: AD_RECURSION_DEPTH
``AD_RECURSION_DEPTH``:
Maximum depth to recurse when checking group membership. A value of
-1 means infinite depth is supported. A value of 0 turns off recursive
checks.
This is ``auth_ad_recursion_depth`` in the site configuration.
.. setting:: AD_SEARCH_ROOT
``AD_SEARCH_ROOT``:
A custom search root for entries in Active Directory. This must be a
string.
This is ``auth_ad_search_root`` in the site configuration.
.. setting:: AD_USE_TLS
``AD_USE_TLS``:
Whether to use TLS when communicating over LDAP. This must be a
boolean.
This is ``auth_ad_use_tls`` in the site configuration.
"""
backend_id = 'ad'
name = _('Active Directory')
settings_form = ActiveDirectorySettingsForm
login_instructions = \
_('Use your standard Active Directory username and password.')
def get_domain_name(self):
"""Return the current Active Directory domain name.
This returns the domain name as set in :setting:`AD_DOMAIN_NAME`.
Returns:
unicode:
The Active Directory domain name.
"""
return settings.AD_DOMAIN_NAME
def get_ldap_search_root(self, user_domain=None):
"""Return the search root(s) for users in the LDAP server.
If :setting:`AD_SEARCH_ROOT` is set, then it will be used. Otherwise,
a suitable search root will be computed based on the domain name
(either the provided ``user_domain`` or the result of
:py:meth:`get_domain_name`) and any configured Organizational Unit
name (:setting:`AD_OU_NAME`).
Args:
user_domain (unicode, optional):
An explicit Active Directory domain to use for the search root.
Returns:
unicode:
The search root used to locate users.
"""
if getattr(settings, 'AD_SEARCH_ROOT', None):
return settings.AD_SEARCH_ROOT
dn = []
if settings.AD_OU_NAME:
dn.append([('ou', settings.AD_OU_NAME, None)])
if user_domain is None:
user_domain = self.get_domain_name()
if user_domain:
dn += [
[('dc', dc, None)]
for dc in user_domain.split('.')
]
return dn2str(dn)
def search_ad(self, con, filterstr, user_domain=None):
"""Search the given LDAP server based on the provided filter.
Args:
con (ldap.LDAPObject):
The LDAP connection to search.
filterstr (unicode):
The filter string used to locate objects in Active Directory.
user_domain (unicode, optional):
An explicit domain used for the search. If not provided,
:py:meth:`get_domain_name` will be used.
Returns:
list of tuple:
The list of search results. Each tuple in the list is in the form
of ``(dn, attrs)``, where ``dn`` is the Distinguished Name of the
entry and ``attrs`` is a dictionary of attributes for that entry.
"""
search_root = self.get_ldap_search_root(user_domain)
logger.debug('Search root "%s" for filter "%s"',
search_root, filterstr)
return con.search_s(search_root,
scope=ldap.SCOPE_SUBTREE,
filterstr=filterstr)
def find_domain_controllers_from_dns(self, user_domain=None):
"""Find and return the active domain controllers using DNS.
Args:
user_domain (unicode, optional):
An explicit domain used for the search. If not provided,
:py:meth:`get_domain_name` will be used.
Returns:
list of unicode:
The list of domain controllers.
"""
record_name = '_ldap._tcp.%s' % (user_domain or self.get_domain_name())
try:
answer = dns.resolver.query(record_name, 'SRV')
return [
(rdata.port, rdata.target.to_unicode(omit_final_dot=True))
for rdata in sorted(answer,
key=lambda rdata: (rdata.priority,
-rdata.weight))
]
except dns.resolver.NXDOMAIN:
# The domain could not be found. Skip it.
pass
except Exception as e:
logger.error('Unable to query for Active Directory domain '
'controllers using DNS record "%s": %s',
record_name,
e)
return []
def can_recurse(self, depth):
"""Return whether the given recursion depth is too deep.
Args:
depth (int):
The current depth to check.
Returns:
bool:
``True`` if the provided depth can be recursed into. ``False``
if it's too deep.
"""
return (settings.AD_RECURSION_DEPTH == -1 or
depth <= settings.AD_RECURSION_DEPTH)
def get_member_of(self, con, search_results, seen=None, depth=0):
"""Return the LDAP groups for the given users.
This iterates over the users specified in ``search_results`` and
returns a set of groups of which those users are members.
Args:
con (ldap.LDAPObject):
The LDAP connection used for checking groups memberships.
search_results (list of tuple):
The list of search results to check. This expects a result
from :py:meth:`search_ad`.
seen (set, optional):
The set of groups that have already been seen when recursing.
This is used internally by this method and should not be
provided by the caller.
depth (int, optional):
The current recursion depth. This is used internally by this
method and should not be provided by the caller.
Returns:
set:
The group memberships fo
|
kshedstrom/pyroms
|
bathy_smoother/external/lp_solve_5.5/extra/Python/pyhelp.py
|
Python
|
bsd-3-clause
| 381
| 0.023622
|
import os
im
|
port sys
setupfile = file("setup.py",'r')
input = sys.argv[1]
lines = setupfile.readlines()
output = file("setup.py", 'w')
for line in lines:
if line.startswith(" library_dirs"):
newline = " library_dirs=['"+str(input)+"'],\n"
output.
|
write(newline)
else:
output.write(line)
setupfile.close()
output.close()
|
xguse/outspline
|
src/outspline/conf/plugins/wxtrayicon.py
|
Python
|
gpl-3.0
| 1,043
| 0
|
# Outspline - A highly modular and extensible outliner.
# Copyright (C) 2011-2014 Dario Giovannetti <dev@dariogiovannetti.net>
#
# This file is part of Outspline.
#
# Outspline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Outspline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without e
|
ven the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULA
|
R PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outspline. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict as OD
data = (
OD((
("enabled", "on"),
)),
OD((
("GlobalShortcuts", (
OD((
("minimize", "Ctrl+Shift+q"),
)),
OD()
)),
))
)
|
TomBaxter/osf.io
|
api_tests/nodes/views/test_node_preprints.py
|
Python
|
apache-2.0
| 6,285
| 0.003819
|
import pytest
from addons.osfstorage.models import OsfStorageFile
from api.base.settings.defaults import API_BASE
from api_tests import utils as test_utils
from api_tests.preprints.filters.test_filters import PreprintsListFilteringMixin
from api_tests.preprints.views.test_preprint_list_mixin import PreprintIsPublishedListMixin, PreprintIsValidListMixin
from framework.auth.core import Auth
from osf.models import PreprintService
from osf_tests.factories import (
PreprintFactory,
AuthUserFactory,
ProjectFactory,
SubjectFactory,
PreprintProviderFactory,
)
from website.util import permissions
class TestNodePreprintsListFiltering(PreprintsListFilteringMixin):
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def provider_one(self):
return PreprintProviderFactory(name='Sockarxiv')
@pytest.fixture()
def provider_two(self):
return PreprintProviderFactory(name='Piratearxiv')
@pytest.fixture()
def provider_three(self):
return PreprintProviderFactory(name='Mockarxiv')
@pytest.fixture()
def project_one(self, user):
return ProjectFactory(creator=user)
@pytest.fixture()
def project_two(self, project_one):
return project_one
@pytest.fixture()
def project_three(self, project_one):
return project_one
@pytest.fixture()
def url(self, project_one):
return '/{}nodes/{}/preprints/?version=2.2&'.format(API_BASE, project_one._id)
def test_prov
|
ider_filter_equals_returns_one(self, app, user, provider_two, preprint_two, provider_url):
expected = [preprint_two._id]
res = app.get('{}{}'.format(provider_url, provider_two._id), auth=user.auth)
actual = [preprint['id'] for preprint in res.json['data']]
assert expected == actual
class TestNodePreprintIsPublishedList(PreprintIsPublishedListMixin):
@pytest.fixture()
def u
|
ser_admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def provider_one(self):
return PreprintProviderFactory()
@pytest.fixture()
def provider_two(self):
return PreprintProviderFactory()
@pytest.fixture()
def project_published(self, user_admin_contrib):
return ProjectFactory(creator=user_admin_contrib, is_public=True)
@pytest.fixture()
def project_public(self, user_write_contrib, project_published):
project_published.add_contributor(user_write_contrib, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
return project_published
@pytest.fixture()
def url(self, project_published):
return '/{}nodes/{}/preprints/?version=2.2&'.format(API_BASE, project_published._id)
@pytest.fixture()
def preprint_unpublished(self, user_admin_contrib, provider_one, project_published, subject):
return PreprintFactory(creator=user_admin_contrib, filename='mgla.pdf', provider=provider_one, subjects=[[subject._id]], project=project_published, is_published=False)
def test_unpublished_visible_to_admins(self, app, user_admin_contrib, preprint_unpublished, preprint_published, url):
res = app.get(url, auth=user_admin_contrib.auth)
assert len(res.json['data']) == 2
assert preprint_unpublished._id in [d['id'] for d in res.json['data']]
assert preprint_published._id in [d['id'] for d in res.json['data']]
def test_unpublished_invisible_to_write_contribs(self, app, user_write_contrib, preprint_unpublished, preprint_published, url):
res = app.get(url, auth=user_write_contrib.auth)
assert len(res.json['data']) == 1
assert preprint_unpublished._id not in [d['id'] for d in res.json['data']]
def test_filter_published_false_write_contrib(self, app, user_write_contrib, preprint_unpublished, url):
res = app.get('{}filter[is_published]=false'.format(url), auth=user_write_contrib.auth)
assert len(res.json['data']) == 0
class TestNodePreprintIsValidList(PreprintIsValidListMixin):
@pytest.fixture()
def user_admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project(self, user_admin_contrib, user_write_contrib):
project = ProjectFactory(creator=user_admin_contrib, is_public=True)
project.add_contributor(user_write_contrib, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
return project
@pytest.fixture()
def provider(self):
return PreprintProviderFactory()
@pytest.fixture()
def url(self, project):
return '/{}nodes/{}/preprints/?version=2.2&'.format(API_BASE, project._id)
# test override: custom exception checks because of node permission failures
def test_preprint_private_invisible_no_auth(self, app, project, preprint, url):
res = app.get(url)
assert len(res.json['data']) == 1
project.is_public = False
project.save()
res = app.get(url, expect_errors=True)
assert res.status_code == 401
# test override: custom exception checks because of node permission failures
def test_preprint_private_invisible_non_contributor(self, app, user_non_contrib, project, preprint, url):
res = app.get(url, auth=user_non_contrib.auth)
assert len(res.json['data']) == 1
project.is_public = False
project.save()
res = app.get(url, auth=user_non_contrib.auth, expect_errors=True)
assert res.status_code == 403
# test override: custom exception checks because of node permission failures
def test_preprint_node_deleted_invisible(self, app, user_admin_contrib, user_write_contrib, user_non_contrib, project, preprint, url):
project.is_deleted = True
project.save()
# no auth
res = app.get(url, expect_errors=True)
assert res.status_code == 410
# contrib
res = app.get(url, auth=user_non_contrib.auth, expect_errors=True)
assert res.status_code == 410
# write_contrib
res = app.get(url, auth=user_write_contrib.auth, expect_errors=True)
assert res.status_code == 410
# admin
res = app.get(url, auth=user_admin_contrib.auth, expect_errors=True)
assert res.status_code == 410
|
WGDEVS/RouterSim
|
RouterSim.py
|
Python
|
gpl-2.0
| 10,354
| 0.016226
|
'''
Router Sim v1.0
Program description: This program simulates a network consisting of routers.
It will be displayed in a command line interface and feature controls
to build and maintain the network by modifying routers, save/load the
network to external files, display information about the routers in
the network and, use pathfinding algorithms to find routes within the
network.
Made by WGDEV, some rights reserved, see license.txt for more info
'''
Main = list() #the database of all routers, keeps track of links to other routers, looks like [[router #,[[other router #, bandwidth],...]]...]
'''
Description: Implements dijkstra's algorithim to find the best path to a target router from an inital router
Parameters:
Router: The initial router's number
Target: The target router's number
Returns:
A string representation of the path to the target from the initial router or an error message
'''
def findPath(Router, Target):
global Main
q = [[0,Router,str(Router)]] #q is short for the queue, looks like [[delay,router #,path so far]...]
mapy = [[0,Router]] #Keeps track of the best path to a router so far, sorted based on router #,uses the same format as q except no path so far
if not binSearch(0,Main,Router)[1]:
return("Initial router not found!")
q.insert(0,[False,False])
while (q[0][1] != Target):
q.pop(0)
if len(q) <= 0:
return("No path found!")
ln = binSearch(1,mapy,q[0][1])
if ln[1] and mapy[ln[0]][0] < q[0][0]:
continue
ln = binSearch(0,Main, q[0][1])
for i in Main[ln[0]][1]:
ln2 = binSearch(1,mapy,i[0])
nc = q[0][0] + i[1]
if not ln2[1]:
mapy.insert(ln2[0],[nc,i[0]])
else:
if mapy[ln2[0]][0] > nc:
mapy[ln2[0]][0] = nc
else:
continue
ln3 = binSearch(0,q,nc)
if ln3[0] <= 0:
ln3[0] = 1
q.insert(ln3[0],[nc,i[0],q[0][2]+"->" +str(i[0])])
return("Delay is " + str(q[0][0]) + " with a path of " + q[0][2])
'''
Description: Implements a binary search algorithim to find the index of a list with an item at a specified index in a jagged list
Parameters:
Index: The index of the value in each list in the jagged list to compare
List: The jagged list
Value: The value to find
Returns:
A list with two items, the first is the index of the list with the value, the second is if the list is actually in the list
'''
def binSearch(Index,List,Value):
if len(List) == 0:
return [0,False]
if len(List) == 1:
if List[0][Index] == Value:
return [0,True]
elif List[0][Index] > Value:
return [0,False]
else:
return [1,False]
mini = 0
maxi = len(List)-1
while(maxi-mini>1):
mid = (int)((mini+maxi)/2)
if(List[mid][Index]== Value):
return [mid,True]
elif(List[mid][Index] > Value):
maxi = mid
else:
mini = mid
if (List[mini][Index] == Value):
return [mini,True]
elif (List[maxi][Index] == Value):
return [maxi,True]
elif (List[mini][Index] > Value):
return [mini,False]
elif (List[maxi][Index] > Value):
return [maxi,False]
else:
return [maxi+1,False]
'''
Description: Prints all the availabe commands
'''
def showOptions():
print("Command -> Effect:")
print("save [filename] -> saves the network to the file")
print("load [filename] -> loads the network from the file")
print("tracert [router 1] [router 2] -> finds the path between two routers")
print("link [router 1] [router 2] [delay] -> adds/updates a link between two routers")
print("remove [router 1] [router 2] -> removes the link between two routers")
print("delete [router] -> deletes the router")
print("neighbour [router] -> lists all routers directly linked to the specified router")
print("topology -> lists all routers in the network")
'''
Description: Prints all the routers in the network
'''
|
def showRouters():
print("Showing all routers in the network:")
for i in Main:
print("Router " + str(i[0]) + " has " + str(len(i[1])) + " link(s) to other routers")
'''
Description: Prints all the routers that are directly linked to a router
Parameters:
Router1: The specified router's number
'''
def showRoutes(Router1):
ln = binSearch(0,M
|
ain,Router1)
if (not ln[1]):
print ("Router does not exist!")
return
print("Showing neighbour(s) for router "+str(Router1)+":")
for i in Main[ln[0]][1]:
print("Other router is " + str(i[0]) + " with a delay of " + str(i[1]) + ".")
'''
Description: Takes two routers, creates them if they do not exist and, links them or changes the cost if the link already exists
Parameters:
Router1: The first router's number
Router2: The second router's number
Cost: The new cost of the link
Returns: A string represening the number of routers that were created by the function
'''
def addRoute(Router1,Router2,Cost):
if Cost < 1:
return "Delay must be at least 1!"
if Router1 == Router2:
return "Links cannot loop!"
global Main
outp = 0
ln = binSearch(0,Main,Router1)
if ln[1] == False:
Main.insert(ln[0],[Router1,[]])
outp +=1
ln2 = binSearch(0,Main[ln[0]][1],Router2)
if ln2[1] == False:
Main[ln[0]][1].insert(ln2[0],[Router2,Cost])
else:
Main[ln[0]][1][ln2[0]][1] = Cost
ln = binSearch(0,Main,Router2)
if ln[1] == False:
Main.insert(ln[0],[Router2,[]])
outp +=1
ln2 = binSearch(0,Main[ln[0]][1],Router1)
if ln2[1] == False:
Main[ln[0]][1].insert(ln2[0],[Router1,Cost])
else:
Main[ln[0]][1][ln2[0]][1] = Cost
return "Link sucessfully added, with " + str(outp) + " router(s) automatically installed."
'''
Description: Takes two routers, deletes the link between them and, deletes them if they do not have any links left afterwards
Parameters:
Router1: The first router's number
Router2: The second router's number
Returns: A string represening the number of routers that were deleted by the function
'''
def removeRoute(Router1,Router2):
global Main
outp = 0
ln1 = binSearch(0,Main,Router1)
if (not (ln1[1])):
return "One or more specified router(s) do not exist!"
ln = binSearch(0,Main[ln1[0]][1],Router2)
if (not ln[1]):
return "The link does not exist!"
Main[ln1[0]][1].pop(ln[0])
if len(Main[ln1[0]][1]) <= 0:
Main.pop(ln1[0])
outp += 1
ln1 = binSearch(0,Main,Router2)
if (not (ln1[1])):
return "One or more specified router(s) do not exist!"
ln = binSearch(0,Main[ln1[0]][1],Router1)
if (not ln[1]):
return "The link does not exist!"
Main[ln1[0]][1].pop(ln[0])
if len(Main[ln1[0]][1]) <= 0:
Main.pop(ln1[0])
outp += 1
return "Link sucessfully deleted, with " + str(outp) + " router(s) automatically removed."
'''
Description: Deletes a router and any links associated with the router, also automatically deletes any routers with no links
Parameters:
Router1: The specified router's number
Returns: A string represening the number of routers that were deleted by the function
'''
def deleteRoute(Router1):
global Main
outp = 0
ln = binSearch(0,Main,Router1)
if (not ln[1]):
return "Router does not exist!"
while (len(Main[ln[0]][1]) > 0):
ln1 = binSearch(0,Main,Main[ln[0]][1][0][0])
ln2 = binSearch(0,Main[ln1[0]][1],Router1)
Main[ln1[0]][1].pop(ln2[0])
if len(Main[ln1[0]][1]) <= 0:
Main.pop(ln1[0])
if (ln1[0] < ln[0]):
ln[0] -= 1
outp += 1
Main[ln[0]][1].pop(0)
Main.pop(ln[0])
return "Router sucessfully deleted, with " + str(outp) + " other router(s) automatically removed."
'''
Description: Saves the networ
|
p2pu/learning-circles
|
studygroups/migrations/0084_auto_20180215_0747.py
|
Python
|
mit
| 366
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-02-15 07:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('studygroups', '0083_auto_20180209_1210'),
]
operations = [
migrations.RenameModel(
|
old_name='Facilitator',
new_name='Profile',
),
]
|
|
GeorgiaTechDHLab/TOME
|
topics/migrations/0002_auto_20170308_2245.py
|
Python
|
bsd-3-clause
| 537
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-08 22:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('topics', '0001_initial'),
]
operations = [
|
migrations.AlterModelOptions(
name='articletopicrank',
options={'ordering': ('rank',)},
),
migrations.AlterModelOptions(
name='wordtopicrank',
options={'orderin
|
g': ('rank',)},
),
]
|
ArcasProject/Arcas
|
tests/test_springer.py
|
Python
|
mit
| 3,670
| 0.007084
|
import arcas
import pandas
def test_setup():
api = arcas.Springer()
assert api.standard == 'http://api.springer.com/metadata/pam?q='
def test_keys():
api = arcas.Springer()
assert api.keys() == ['url', 'key', 'unique_key', 'title', 'author', 'abstract',
'doi', 'date', 'journal', 'provenance', 'category', 'score',
'open_access']
def test_parameters_and_url_author():
api = arcas.Springer()
parameters = api.parameters_fix(author='Glynatsi')
assert parameters == ['name:Glynatsi']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=name:Glynatsi&api_key=Your key here'
def test_parameters_and_url_title():
api = arcas.Springer()
parameters = api.parameters_fix(title='Game')
assert parameters == ['title:Game']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=title:Game&api_key=Your key here'
def test_parameters_and_url_category():
api = arcas.Springer()
parameters = api.parameters_fix(category='game theory')
assert parameters =
|
= ['subject:game theory']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=subject:game theory&api_key=Your key here'
def test_parameters_and_url_journal():
api = arcas.Springer()
para
|
meters = api.parameters_fix(journal='Springer')
assert parameters == ['pub:Springer']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=pub:Springer&api_key=Your key here'
def test_parameters_and_url_record():
api = arcas.Springer()
parameters = api.parameters_fix(records=1)
assert parameters == ['p=1']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=p=1&api_key=Your key here'
def test_parameters_and_url_start():
api = arcas.Springer()
parameters = api.parameters_fix(start=1)
assert parameters == ['s=1']
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=s=1&api_key=Your key here'
def test_create_url_search():
api = arcas.Springer()
parameters = api.parameters_fix(title='Nash', journal='Spinger', records=2, start=5)
url = api.create_url_search(parameters)
assert url == 'http://api.springer.com/metadata/pam?q=title:Nash+AND+pub:Spinger&p=2&s=5&api_key=Your key here'
def test_to_dataframe():
dummy_article = {'identifier': 'doi:10.1000/', 'title': 'Title',
'creator': 'E Glynatsi, V Knight', 'publicationName':
'Awesome Journal', 'genre': 'ReviewPaper', 'openAccess': 'false',
'h1': 'Abstract', 'p': 'Abstract',
'doi': '10.1000/', 'publisher': 'Springer',
'publicationDate': '2021-01-01', 'url': 'http://dx.doi.org/10.1000/',
'openAccess': 'false',}
api = arcas.Springer()
article = api.to_dataframe(dummy_article)
assert isinstance(article, pandas.core.frame.DataFrame)
assert list(article.columns) == api.keys()
assert len(article['url']) == 2
assert article['url'].unique()[0] == 'http://dx.doi.org/10.1000/'
assert article['key'].unique()[0] == 'Glynatsi2021'
assert article['title'].unique()[0] == 'Title'
assert article['abstract'].unique()[0] == 'Abstract'
assert article['journal'].unique()[0] == 'Awesome Journal'
assert article['date'].unique()[0] == 2021
assert article['open_access'].unique()[0] == False
assert article['score'].unique()[0] == 'Not available'
|
jreback/pandas
|
pandas/tests/series/methods/test_shift.py
|
Python
|
bsd-3-clause
| 13,266
| 0.00098
|
import numpy as np
import pytest
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
NaT,
Series,
TimedeltaIndex,
date_range,
offsets,
)
import pandas._testing as tm
from pandas.tseries.offsets import BDay
class TestShift:
@pytest.mark.parametrize(
"ser",
[
Series([np.arange(5)]),
date_range("1/1/2011", periods=24, freq="H"),
Series(range(5), index=date_range("2017", periods=5)),
],
)
@pytest.mark.parametrize("shift_size", [0, 1, 2])
def test_shift_always_copy(self, ser, shift_size):
# GH22397
assert ser.shift(shift_size) is not ser
@pytest.mark.parametrize("move_by_freq", [pd.Timedelta("1D"), pd.Timedelta("1min")])
def test_datetime_shift_always_copy(self, move_by_freq):
# GH#22397
ser = Series(range(5), index=date_range("2017", periods=5))
assert ser.shift(freq=move_by_freq) is not ser
def test_shift(self, datetime_series):
shifted = datetime_series.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, datetime_series.index)
tm.assert_index_equal(unshifted.index, datetime_series.index)
tm.assert_numpy_array_equal(
unshifted.dropna().values, datetime_series.values[:-1]
)
offset = BDay()
shifted = datetime_series.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
unshifted = datetime_series.shift(0, freq=offset)
tm.assert_series_equal(unshifted, datetime_series)
shifted = datetime_series.shift(1, freq="B")
unshifted = shifted.shift(-1, freq="B")
tm.assert_series_equal(unshifted, datetime_series)
# corner case
unshifted = datetime_series.shift(0)
tm.assert_series_equal(unshifted, datetime_series)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.s
|
hift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index,
|
ps.index)
tm.assert_numpy_array_equal(unshifted.dropna().values, ps.values[:-1])
shifted2 = ps.shift(1, "B")
shifted3 = ps.shift(1, BDay())
tm.assert_series_equal(shifted2, shifted3)
tm.assert_series_equal(ps, shifted2.shift(-1, "B"))
msg = "Given freq D does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.shift(freq="D")
# legacy support
shifted4 = ps.shift(1, freq="B")
tm.assert_series_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=BDay())
tm.assert_series_equal(shifted5, shifted4)
# 32-bit taking
# GH#8129
index = date_range("2000-01-01", periods=5)
for dtype in ["int32", "int64"]:
s1 = Series(np.arange(5, dtype=dtype), index=index)
p = s1.iloc[1]
result = s1.shift(periods=p)
expected = Series([np.nan, 0, 1, 2, 3], index=index)
tm.assert_series_equal(result, expected)
# GH#8260
# with tz
s = Series(
date_range("2000-01-01 09:00:00", periods=5, tz="US/Eastern"), name="foo"
)
result = s - s.shift()
exp = Series(TimedeltaIndex(["NaT"] + ["1 days"] * 4), name="foo")
tm.assert_series_equal(result, exp)
# incompat tz
s2 = Series(date_range("2000-01-01 09:00:00", periods=5, tz="CET"), name="foo")
msg = "DatetimeArray subtraction must have the same timezones or no timezones"
with pytest.raises(TypeError, match=msg):
s - s2
def test_shift2(self):
ts = Series(
np.random.randn(5), index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(1, freq="5T")
exp_index = ts.index.shift(1, freq="5T")
tm.assert_index_equal(result.index, exp_index)
# GH#1063, multiple of same base
result = ts.shift(1, freq="4H")
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"])
msg = "Cannot shift with no freq"
with pytest.raises(NullFrequencyError, match=msg):
idx.shift(1)
def test_shift_fill_value(self):
# GH#24128
ts = Series(
[1.0, 2.0, 3.0, 4.0, 5.0], index=date_range("1/1/2000", periods=5, freq="H")
)
exp = Series(
[0.0, 1.0, 2.0, 3.0, 4.0], index=date_range("1/1/2000", periods=5, freq="H")
)
# check that fill value works
result = ts.shift(1, fill_value=0.0)
tm.assert_series_equal(result, exp)
exp = Series(
[0.0, 0.0, 1.0, 2.0, 3.0], index=date_range("1/1/2000", periods=5, freq="H")
)
result = ts.shift(2, fill_value=0.0)
tm.assert_series_equal(result, exp)
ts = Series([1, 2, 3])
res = ts.shift(2, fill_value=0)
assert res.dtype == ts.dtype
def test_shift_categorical_fill_value(self):
ts = Series(["a", "b", "c", "d"], dtype="category")
res = ts.shift(1, fill_value="a")
expected = Series(
pd.Categorical(
["a", "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
)
tm.assert_equal(res, expected)
# check for incorrect fill_value
msg = "'fill_value=f' is not present in this Categorical's categories"
with pytest.raises(TypeError, match=msg):
ts.shift(1, fill_value="f")
def test_shift_dst(self):
# GH#13926
dates = date_range("2016-11-06", freq="H", periods=10, tz="US/Eastern")
s = Series(dates)
res = s.shift(0)
tm.assert_series_equal(res, s)
assert res.dtype == "datetime64[ns, US/Eastern]"
res = s.shift(1)
exp_vals = [NaT] + dates.astype(object).values.tolist()[:9]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == "datetime64[ns, US/Eastern]"
res = s.shift(-2)
exp_vals = dates.astype(object).values.tolist()[2:] + [NaT, NaT]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == "datetime64[ns, US/Eastern]"
for ex in [10, -10, 20, -20]:
res = s.shift(ex)
exp = Series([NaT] * 10, dtype="datetime64[ns, US/Eastern]")
tm.assert_series_equal(res, exp)
assert res.dtype == "datetime64[ns, US/Eastern]"
@pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning")
def test_tshift(self, datetime_series):
# TODO: remove this test when tshift deprecation is enforced
# PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_series_equal(unshifted, ps)
shifted2 = ps.tshift(freq="B")
tm.assert_series_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=BDay())
tm.assert_series_equal(shifted, shifted3)
msg = "Given freq M does not match PeriodIndex freq B"
with pytest.raises(ValueError, match=msg):
ps.tshift(freq="M")
# DatetimeIndex
shifted = datetime_series.tshift(1)
unshifted = shifted.tshift(-1)
tm.assert_series_equal(datetime_series, unshifted)
shifted2 = datetime_series.tshift(freq=datetime_series.index.freq)
tm.assert_series_equal(shifted, shifted2)
inferred_ts = Series(
datetime_series.values, Index(np.asarray(datetime_series.index)), name="ts"
)
shifted = inferred_ts.tshift(1)
expected = datetime_series.tshift(1)
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(shifted, expected)
unshifted = shifted.tshift(-1)
tm.assert_series_equal(unshifted, inf
|
diffeo/rejester
|
rejester/_redis.py
|
Python
|
mit
| 2,666
| 0.0015
|
"""Common base class for Redis-based distributed worker systems.
This software is released under an MIT/X11 open source license.
Copyright 2012-2014 Diffeo, Inc.
"""
from __future__ import absolute_import
import logging
import socket
import redis
from rejester.exceptions import ProgrammerError
logger = logging.getLogger(__name__)
class RedisBase(object):
"""Common base class for Redis-based distributed worker systems.
This class stores common metadata for systems based on the Redis
in-memory database (http://redis.io/).
The work being done is identified by two strings, the _application name_
and the _namespace_. These two strings are concatenated together and
prepended to most Redis keys by ``_namespace()``. To avoid leaking
database space, it is important to clean up the namespace, for instance
with ``delete_namespace()``, when the application is done.
"""
def __init__(self, config):
"""Initialize the registry using a configuration object.
``config`` should be a dictionary with the following keys:
``registry_addresses``
list of ``host:port`` for the Redis server(s)
``app_name``
application name (typically fixed, e.g. "rejester")
``namespace``
application inv
|
ocation namespace name (should be unique per run)
"""
super(RedisBase, self).__init__()
self.config = config
if 'registry_addresses' not in config:
raise ProgrammerError('registry_addresses not
|
set')
redis_address, redis_port = config['registry_addresses'][0].split(':')
redis_port = int(redis_port)
self._local_ip = self._ipaddress(redis_address, redis_port)
if 'app_name' not in config:
raise ProgrammerError('app_name must be specified to configure Registry')
self._namespace_str = config['app_name'] + '_' + config['namespace']
self.pool = redis.ConnectionPool(host=redis_address, port=redis_port, db=0)
def _ipaddress(self, host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((host, port))
local_ip = s.getsockname()[0]
s.close()
return local_ip
def delete_namespace(self):
'''Remove all keys from the namespace
'''
conn = redis.Redis(connection_pool=self.pool)
keys = conn.keys("%s*" % self._namespace_str)
for i in xrange(0, len(keys), 10000):
conn.delete(*keys[i:i+10000])
logger.debug('tearing down %r', self._namespace_str)
def _namespace(self, name):
return "%s_%s" % (self._namespace_str, name)
|
zaibacu/zTemplate
|
lib/zTemplate.py
|
Python
|
mit
| 2,885
| 0.038475
|
import platform
from copy import *
from ctypes import *
class Param(Structure): #Forward declaration
pass
class Value(Structure):
pass
class StringValue(Structure):
pass
class BoolValue(Structure):
pass
class NumberValue(Structure):
pass
class ListValue(Structure):
pass
PARAM_P = POINTER(Param)
VALUE_P = POINTER(Value)
LIST_P = POINTER(ListValue)
Value._fields_ = [
("type", c_uint),
("val", c_void_p)
]
StringValue._fields_ = [
("value", c_char_p)
]
BoolValue._fields_ = [
("value", c_bool)
]
NumberValue._fields_ = [
("value", c_int)
]
ListValue._fields_ = [
("value", VALUE_P),
("next", LIST_P)
]
Param._fields_ = [
("key", c_char_p),
("value", VALUE_P),
("next", PARAM_P)
]
class zTemplate(object):
def __init__(self):
if platform.system() == "Windows":
self.lib = cdll.LoadLibrary("bin/zTemplate.dll")
else:
self.lib = cdll.LoadLibrary("bin/zTemplate.so")
self.lib.render.restype = c_char_p
self.lib.render.argtype = [c_char_p, PARAM_P]
self.lib.render_text.restype = c_char_p
self.lib.render.argtype = [c_char_p, PARAM_P]
def handle_type(self, value):
v = Value()
if type(value) == list:
v.type = 4
rev = value[:]
rev.reverse()
prev_item = None
for item in rev:
lv = ListValue()
self.Values.append(lv)
lv.value = VALUE_P(self.handle_type(item))
if prev_item != None:
lv.next = LIST_P(prev_item)
prev_item = lv
v.val = cast(byref(lv), c_void_p)
elif type(value) == dict:
pass
elif type(value) == str:
sv = StringValue()
sv.value = value.encode("UTF-8")
self.Values.append(sv)
v.type = 1
v.val = cast(byref(sv), c_void_p)
elif type(value) == bool:
bv = BoolValue()
bv.value = value
self.Values.append(bv)
v.type = 2
v.val = cast(byref(bv), c_void_p)
elif type(value) == int:
nv = NumberValue()
nv.value = value
self.Values.append(nv)
v.type = 3
v.val = cast(byref(nv), c_void_p)
else:
print("Unhandled type %s" % type(value))
return v
def render(self, file, params = {}):
root = self.construct_params(params)
return self.lib.render(file.encode("UTF-8"), byref(root))
def render_text(self, text, params = {}):
root = self.construct_params(params)
return self.lib.render_text(text.encode("UTF-8"), byref(root))
def construct_params(self, params):
root = Param()
cursor = root
self.Values = [] #Just to keep our value structs not destroyed
for key, value in params.items():
if type(value) == dict:
for name, member in value.items():
p = Param()
p.key = ("%s
|
->%s" % (key, name)).encode("UTF-8")
v = self.handle_type(member)
p.value = VALUE_P(v)
cursor.next = PARAM_P(p)
cur
|
sor = p
else:
p = Param()
p.key = key.encode("UTF-8")
v = self.handle_type(value)
p.value = VALUE_P(v)
cursor.next = PARAM_P(p)
cursor = p
return root
|
MrTrustworthy/traveler
|
tests/test_poller.py
|
Python
|
mit
| 340
| 0.002941
|
import un
|
ittest
import traveler
class MainTest(unittest.TestCase):
def test_base(self):
self.assertTrue(True)
def setUp(self):
self.poller = traveler.Poller()
def test_poller(self)
|
:
j = self.poller.load()
self.assertTrue(isinstance(j, str))
if __name__ == '__main__':
unittest.main()
|
kaday/rose
|
lib/python/rose/macros/trigger.py
|
Python
|
gpl-3.0
| 24,332
| 0
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-6 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
import copy
import os
import rose.config
import rose.config_tree
import rose.env
import rose.macro
import rose.macros.rule
import rose.resource
class TriggerMacro(rose.macro.MacroBaseRoseEdit):
"""Class to load and check trigger dependencies."""
ERROR_BAD_EXPR = "Invalid trigger expression: {0}"
ERROR_BAD_STATE = "State should be {0}"
ERROR_CYCLIC = 'Cyclic dependency detected: {0} to {1}'
ERROR_DUPL_TRIG = "Badly defined trigger - {0} is 'duplicate'"
ERROR_MISSING_METADATA = 'No metadata entry found'
WARNING_STATE_CHANGED = '{0} -> {1}'
IGNORED_STATUS_PARENT = 'from state of parent: {0}'
IGNORED_STATUS_VALUE = ('from parent value: {0} '
'is not {2} ({1})')
IGNORED_STATUS_VALUES = ('from parent value: {0} with {1} '
'is not in the allowed values: {2}')
PARENT_VALUE = 'value {0}'
_evaluated_rule_checks = {}
MAX_STORED_RUL
|
E_CHECKS = 10000
def _setup_triggers(self, meta_config):
self.trigger_family_lookup = {}
self._id_is_duplicate = {} # Speedup dictionary.
self
|
.enabled_dict = {}
self.evaluator = rose.macros.rule.RuleEvaluator()
self.rec_rule = rose.macros.rule.REC_EXPR_IS_THIS_RULE
for setting_id, sect_node in meta_config.value.items():
if sect_node.is_ignored():
continue
opt_node = sect_node.get([rose.META_PROP_TRIGGER], no_ignore=True)
if opt_node is not None:
expr = opt_node.value
id_value_dict = rose.variable.parse_trigger_expression(expr)
for trig_id, values in id_value_dict.items():
if values == []:
id_value_dict.update({trig_id: [None]})
self.trigger_family_lookup.update({setting_id: id_value_dict})
self._trigger_involved_ids = self.get_all_ids()
def transform(self, config, meta_config=None):
"""Apply metadata trigger expressions to variables."""
self.reports = []
meta_config = self._load_meta_config(config, meta_config)
self._setup_triggers(meta_config)
self.enabled_dict = {}
self.ignored_dict = {}
enabled = rose.config.ConfigNode.STATE_NORMAL
trig_ignored = rose.config.ConfigNode.STATE_SYST_IGNORED
user_ignored = rose.config.ConfigNode.STATE_USER_IGNORED
state_map = {enabled: 'enabled ',
trig_ignored: 'trig-ignored',
user_ignored: 'user-ignored'}
change_list = []
id_list = []
prev_ignoreds = {trig_ignored: [], user_ignored: []}
for keylist, node in config.walk():
if len(keylist) == 1:
n_id = keylist[0]
else:
n_id = self._get_id_from_section_option(*keylist)
id_list.append(n_id)
if node.state in prev_ignoreds:
prev_ignoreds[node.state].append(n_id)
for var_id in self.trigger_family_lookup:
self.update(var_id, config, meta_config)
for var_id in id_list:
section, option = self._get_section_option_from_id(var_id)
node = config.get([section, option])
old, new = None, None
if var_id in self.ignored_dict:
node.state = trig_ignored
if not any([var_id in v for k, v in prev_ignoreds.items()]):
old, new = state_map[enabled], state_map[trig_ignored]
elif var_id in prev_ignoreds[trig_ignored]:
node.state = enabled
old, new = state_map[trig_ignored], state_map[enabled]
elif (var_id in prev_ignoreds[user_ignored] and
var_id in self._trigger_involved_ids):
node.state = enabled
old, new = state_map[user_ignored], state_map[enabled]
if old != new:
info = self.WARNING_STATE_CHANGED.format(old, new)
if option is None:
value = None
else:
value = node.value
self.add_report(section, option, value, info)
return config, self.reports
def update(self, var_id, config_data, meta_config):
"""Update enabled and ignored ids starting with var_id.
var_id - a setting id to start the triggering update at.
config_data - a rose.config.ConfigNode or a dictionary that
looks like this:
{"sections":
{"namelist:foo": rose.section.Section instance,
"env": rose.section.Section instance},
"variables":
{"namelist:foo": [rose.variable.Variable instance,
rose.variable.Variable instance],
"env": [rose.variable.Variable instance]
}
}
meta_config - a rose.config.ConfigNode.
only_these_sections (default None) - a list of sections to
examine. If specified, checking for other sections will be
skipped.
"""
has_ignored_parent = True
config_sections = self._get_config_sections(config_data)
config_sections_duplicate_map = self._get_duplicate_config_sections(
config_data, config_sections=config_sections)
start_ids = [var_id]
alt_ids = self._get_id_duplicates(
var_id, config_data, meta_config,
config_sections_duplicate_map=config_sections_duplicate_map
)
if alt_ids:
start_ids = alt_ids
id_stack = []
for start_id in start_ids:
if (start_id in self.enabled_dict and
start_id not in self.ignored_dict):
has_ignored_parent = False
if not sum([start_id in v for v in
self.trigger_family_lookup.values()]):
has_ignored_parent = False
section, option = self._get_section_option_from_id(start_id)
is_node_present = self._get_config_has_id(config_data, start_id)
if section in self.ignored_dict and option is not None:
has_ignored_parent = True
has_ignored_parent = has_ignored_parent or not is_node_present
id_stack.append((start_id, has_ignored_parent))
update_id_list = []
while id_stack:
this_id, has_ignored_parent = id_stack[0]
alt_ids = self._get_id_duplicates(
this_id, config_data, meta_config,
config_sections_duplicate_map=config_sections_duplicate_map
)
if alt_ids:
this_id = alt_ids.pop(0)
for alt_id in alt_ids:
id_stack.insert(1, (alt_id, has_ignored_parent))
is_duplicate = self._check_is_id_dupl(this_id, meta_config)
# Triggered sections need their options to trigger sub children.
if this_id in config_sections:
options = []
for option in self._get_config_section_options(config_data,
this_id):
skip_id = self._get_id_from_section_option(
this_id, option)
|
waxe/waxe-image
|
waxe_image/scripts/get_ng_build.py
|
Python
|
mit
| 1,483
| 0
|
import io
import os
import requests
import shutil
import sys
import zipfile
from waxe_image import __version__
API_RELEASES_URL = 'https://api.github.com/repos/waxe/waxe-image/releases'
NG_BUILD_FOLDER = 'website'
def main(argv=sys.argv):
if len(argv) > 2:
print('Too many arguments')
sys.exit(1)
global NG_BUILD_FOLDER
if len(argv) == 2:
NG_BUILD_FOLDER = argv[1]
if os.path.isdir(NG_BUILD_FOLDER):
shutil.rmtree(NG_BUILD_FOLDER)
if os.path.exists(NG_BUILD_FOLDER):
print('There is an issue with the folder %s' % NG_BUILD_FOLDER)
sys.exit(1)
r = requests.get(API_RELEASES_URL)
if r.status_code != 200:
raise ValueError('Bad status code %s' % r.status_code)
releases = r.json()
release = None
for rel in releases:
if rel['tag_name'] == __version__:
release = rel
break
if not release:
raise Exception('No release fou
|
nd for the current version %s' %
__version__)
ng_asset = None
for asset in releas
|
e['assets']:
if 'waxe-image-ng.zip' in asset['browser_download_url']:
ng_asset = asset
break
assert(ng_asset)
url = ng_asset['browser_download_url']
r = requests.get(url, stream=True)
if r.status_code != 200:
raise ValueError('Bad status code %s' % r.status_code)
z = zipfile.ZipFile(io.StringIO(r.content))
z.extractall(NG_BUILD_FOLDER)
|
relekang/python-semantic-release
|
tests/mocks/mock_gitlab.py
|
Python
|
mit
| 4,092
| 0.000489
|
import gitlab
from .. import mock, wrapped_config_get
gitlab.Gitlab("") # instantiation necessary to discover gitlab ProjectManager
class _GitlabProject:
def __init__(self, status):
self.commits = {"my_ref": self._Commit(status)}
self.tags = self._Tags()
self.releases = self._Releases()
class _Commit:
def __init__(self, status):
self.statuses = self._Statuses(status)
class _Statuses:
def __init__(self, status):
if status == "pending":
self.jobs = [
{
"name": "good_job",
"status": "passed",
"allow_failure": False,
},
{
"name": "slow_job",
"status": "pending",
"allow_failure": False,
},
]
elif status == "failure":
self.jobs = [
{
"name": "good_job",
"status": "passed",
"allow_failure": False,
},
|
{"name": "bad_job", "status": "failed", "allow_failure": False
|
},
]
elif status == "allow_failure":
self.jobs = [
{
"name": "notsobad_job",
"status": "failed",
"allow_failure": True,
},
{
"name": "good_job2",
"status": "passed",
"allow_failure": False,
},
]
elif status == "success":
self.jobs = [
{
"name": "good_job1",
"status": "passed",
"allow_failure": True,
},
{
"name": "good_job2",
"status": "passed",
"allow_failure": False,
},
]
def list(self):
return self.jobs
class _Tags:
def __init__(self):
pass
def get(self, version):
if version == "vmy_good_tag":
return self._Tag()
elif version == "vmy_locked_tag":
return self._Tag(locked=True)
else:
raise gitlab.exceptions.GitlabGetError
class _Tag:
def __init__(self, locked=False):
self.locked = locked
def set_release_description(self, changelog):
if self.locked:
raise gitlab.exceptions.GitlabUpdateError
class _Releases:
def __init__(self):
pass
def create(self, input):
if input["name"] and input["tag_name"]:
if (
input["tag_name"] == "vmy_good_tag"
or input["tag_name"] == "vmy_locked_tag"
):
return self._Release()
raise gitlab.exceptions.GitlabCreateError
class _Release:
def __init__(self, locked=False):
pass
def mock_gitlab(status="success"):
mocks = [
mock.patch("os.environ", {"GL_TOKEN": "token"}),
mock.patch(
"semantic_release.hvcs.config.get", wrapped_config_get(hvcs="gitlab")
),
mock.patch("gitlab.Gitlab.auth"),
mock.patch(
"gitlab.v4.objects.ProjectManager",
return_value={"owner/repo": _GitlabProject(status)},
),
]
def wraps(func):
for option in reversed(mocks):
func = option(func)
return func
return wraps
|
chaosim/dao
|
dao/base.py
|
Python
|
gpl-3.0
| 113
| 0.035398
|
# -*- coding: utf-8 -*-
def classeq(x, y):
return x.__class__=
|
=y.__class__
class
|
Element(object): pass
|
avtomato/HackerRank
|
Python/_03_Strings/_09_Designer_Door_Mat/solution.py
|
Python
|
mit
| 333
| 0.006006
|
N, M = map(int,input().split()) # More than 6 lines of code will result in 0 score. Bla
|
nk lines are not counted.
for i in range(1, N, 2):
print(('.|.' * i).center(M, '-')) # Enter Code Here
print('WELCOME'.center(M, '-')) # Enter Code Here
for i in range(N-2, -1, -2):
|
print(('.|.' * i).center(M, '-')) # Enter Code Here
|
redsolution/redsolution-cms
|
redsolutioncms/templatetags/redsolutioncms_tags.py
|
Python
|
gpl-3.0
| 1,289
| 0.006206
|
from django import template
from django.template import TOKEN_VAR, TOKEN_BLOCK, TOKEN_COMMENT, TOKEN_TEXT, \
BLOCK_TAG_START, VARIABLE_TAG_START, VARIABLE_TAG_END, BLOCK_TAG_END
register = template.Library()
class RawNode(template.Node):
def __init__(self, data):
self.data = data
def render(self, context):
r
|
eturn self.data
@register.tag
def raw(parser, token):
"""
Render as just text everything between ``{% raw %}`` and ``{% endraw %}``.
"""
E
|
NDRAW = 'endraw'
data = u''
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == ENDRAW:
return RawNode(data)
if token.token_type == TOKEN_VAR:
data += '%s %s %s' % (VARIABLE_TAG_START, token.contents, VARIABLE_TAG_END)
elif token.token_type == TOKEN_BLOCK:
data += '%s %s %s' % (BLOCK_TAG_START, token.contents, BLOCK_TAG_END)
elif token.token_type == TOKEN_COMMENT:
pass # django.template don`t save comments
elif token.token_type == TOKEN_TEXT:
data += token.contents
parser.unclosed_block_tag([ENDRAW])
@register.simple_tag
def start_block():
return u'{%'
@register.simple_tag
def end_block():
return u'%}'
|
itucsdb1509/itucsdb1509
|
clubs.py
|
Python
|
gpl-3.0
| 8,354
| 0.003953
|
import datetime
import os
import json
import re
import psycopg2 as dbapi2
from flask import Flask
from flask import redirect
from flask import request
from flask import render_template
from flask.helpers import url_for
from store import Store
from fixture import *
from sponsors import *
from curlers import *
from clubs import *
from psycopg2.tests import dbapi20
class Clubs:
def __init__(self, name, place, year, chair, number_of_members, rewardnumber):
self.name = name
self.place = place
self.year = year
self.chair = chair
self.number_of_members = number_of_members
self.rewardnumber = rewardnumber
def init_clubs_db(cursor):
cursor.execute( """CREATE TABLE IF NOT EXISTS CLUBS (
ID SERIAL,
NAME VARCHAR(80) NOT NULL,
PLACES INTEGER NOT NULL REFERENCES COUNTRIES(COUNTRY_ID) ON DELETE CASCADE ON UPDATE CASCADE,
YEAR NUMERIC(4) NOT NULL,
CHAIR VARCHAR(80) NOT NULL,
NUMBER_OF_MEMBERS INTEGER NOT NULL,
REWARDNUMBER INTEGER,
PRIMARY KEY(ID)
)""")
add_test_data(cursor)
def add_test_data(
|
cursor):
cursor.execute("""
INSERT INTO CLUBS
(NAME, PL
|
ACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
'Orlando Curling Club',
1,
2014,
'Bryan Pittard',
'7865',
'0');
INSERT INTO CLUBS
(NAME, PLACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
'Wausau Curling Club',
1,
1896,
'Jennie Moran',
'54403',
'11');
INSERT INTO CLUBS
(NAME, PLACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
'Fenerbahçe',
3,
2011,
'Aziz Yıldırım',
'9002',
'1');
INSERT INTO CLUBS
(NAME, PLACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
'Galatasaray',
3,
2000,
'Dursun Aydın Ozbek',
'17864',
'5'
)""")
def add_club(app, request, club):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor = connection.cursor()
cursor.execute("""INSERT INTO CLUBS
(NAME, PLACES, YEAR, CHAIR, NUMBER_OF_MEMBERS, REWARDNUMBER) VALUES (
%s,
%s,
%s,
%s,
%s,
%s
)""", (club.name, club.place, club.year,
club.chair, club.number_of_members, club.rewardnumber))
except:
cursor.rollback()
finally:
cursor.close()
except:
connection.rollback()
finally:
connection.commit()
connection.close()
def delete_club(app, id):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute('DELETE FROM CLUBS WHERE ID = %s', (id,))
except:
cursor.rollback()
finally:
cursor.close()
except:
connection.rollback()
finally:
connection.commit()
connection.close()
def get_clubs_page(app):
if request.method == 'GET':
now = datetime.datetime.now()
clubs = get_all_clubs(app)
countries = get_country_names(app)
return render_template('clubs.html',
clubs=clubs, countries=countries,
current_time=now.ctime())
elif "add" in request.form:
club = Clubs(request.form['name'],
request.form['place'],
request.form['year'],
request.form['chair'],
request.form['number_of_members'],
request.form['rewardnumber'])
add_club(app, request, club)
return redirect(url_for('clubs_page'))
elif "delete" in request.form:
for line in request.form:
if "checkbox" in line:
delete_club(app, int(line[9:]))
return redirect(url_for('clubs_page'))
elif 'search' in request.form:
clubs = search_club(app, request.form['club_to_search'])
return render_template('clubs_search_page.html', clubs = clubs)
def get_clubs_edit_page(app,club_id):
if request.method == 'GET':
now = datetime.datetime.now()
club = get_club(app, club_id)
countries = get_country_names(app)
return render_template('clubs_edit_page.html', current_time=now.ctime(), club=club, countries=countries)
if request.method == 'POST':
club = Clubs(request.form['name'],
request.form['place'],
request.form['year'],
request.form['chair'],
request.form['number_of_members'],
request.form['rewardnumber'])
update_club(app, request.form['id'], club)
return redirect(url_for('clubs_page'))
def get_country_names(app):
connection=dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute('SELECT COUNTRY_ID,COUNTRY_NAME FROM COUNTRIES')
countries = cursor.fetchall()
except dbapi2.Error as e:
print(e.pgerror)
finally:
cursor.close()
except:
connection.rollback()
finally:
connection.close()
return countries
def get_club(app, club_id):
club=None
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute('''
SELECT C.ID, C.NAME, S.COUNTRY_NAME, C.YEAR, C.CHAIR, C.NUMBER_OF_MEMBERS, C.REWARDNUMBER
FROM CLUBS AS C,COUNTRIES AS S
WHERE (
C.ID=%s AND C.PLACES=S.COUNTRY_ID
)
''', club_id);
club = cursor.fetchone()
except dbapi2.Error as e:
print(e.pgerror)
cursor.rollback()
finally:
cursor.close()
except dbapi2.Error as e:
print(e.pgerror)
connection.rollback()
finally:
connection.close()
return club
def update_club(app, id, club):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute("""
UPDATE CLUBS
SET NAME = %s,
PLACES = %s,
YEAR = %s,
CHAIR=%s,
NUMBER_OF_MEMBERS=%s,
REWARDNUMBER= %s
WHERE ID= %s
""", (club.name, club.place, club.year,
club.chair, club.number_of_members, club.rewardnumber, id))
except:
cursor.rollback()
finally:
cursor.close()
except:
connection.rollback()
finally:
connection.commit()
connection.close()
def get_all_clubs(app):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor=connection.cursor()
try:
cursor.execute('''
SELECT C.ID, C.NAME, K.COUNTRY_NAME, C.YEAR, C.CHAIR, C.NUMBER_OF_MEMBERS, C.REWARDNUMBER
FROM CLUBS AS C, COUNTRIES AS K
WHERE C.PLACES=K.COUNTRY_ID
''')
print(1)
clubs = cursor.fetchall()
except:
cursor.rollback()
finally:
cursor.close()
except dbapi2.Error as e:
print(e.pgerror)
connection.rollback()
finally:
connection.close()
return clubs
def search_club(app, name):
connection = dbapi2.connect(app.config['dsn'])
try:
cursor = connection.cursor()
try:
cursor.execute("""
SELECT C.ID, C.NAME, S.COUNTRY_NAME, C.YEAR, C.CHAIR, C.NUMBER_OF_MEMBERS, C.REWARDNUMBER
FROM CLUBS AS C , COUNTRIES AS S
WHERE(
UPPER(C.NAME)=UPPER(%s) AND
C.PLACES=S.COUNTRY_ID
)""", (name,))
clubs = cursor.fetchall()
except dbapi2.Error as e:
print(e.pgerror)
finally:
|
google/trax
|
trax/shapes_test.py
|
Python
|
apache-2.0
| 2,632
| 0.00304
|
# coding=utf-8
# Copyright 2022 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trax.shapes."""
from absl.testing import absltest
import numpy as np
from trax impo
|
rt shapes
from trax.shapes import ShapeDtype
|
class ShapesTest(absltest.TestCase):
def test_constructor_and_read_properties(self):
sd = ShapeDtype((2, 3), np.int32)
self.assertEqual(sd.shape, (2, 3))
self.assertEqual(sd.dtype, np.int32)
def test_default_dtype_is_float32(self):
sd = ShapeDtype((2, 3))
self.assertEqual(sd.shape, (2, 3))
self.assertEqual(sd.dtype, np.float32)
def test_signature_on_ndarray(self):
array = np.array([[2, 3, 5, 7],
[11, 13, 17, 19]],
dtype=np.int16)
sd = shapes.signature(array)
self.assertEqual(sd.shape, (2, 4))
self.assertEqual(sd.dtype, np.int16)
def test_shape_dtype_repr(self):
sd = ShapeDtype((2, 3))
repr_string = '{}'.format(sd)
self.assertEqual(repr_string,
"ShapeDtype{shape:(2, 3), dtype:<class 'numpy.float32'>}")
def test_splice_signatures(self):
sd1 = ShapeDtype((1,))
sd2 = ShapeDtype((2,))
sd3 = ShapeDtype((3,))
sd4 = ShapeDtype((4,))
sd5 = ShapeDtype((5,))
# Signatures can be ShapeDtype instances, tuples of 2+ ShapeDtype instances,
# or empty tuples.
sig1 = sd1
sig2 = (sd2, sd3, sd4)
sig3 = ()
sig4 = sd5
spliced = shapes.splice_signatures(sig1, sig2, sig3, sig4)
self.assertEqual(spliced, (sd1, sd2, sd3, sd4, sd5))
def test_len_signature(self):
"""Signatures of all sizes should give correct length when asked."""
x1 = np.array([1, 2, 3])
x2 = np.array([10, 20, 30])
inputs0 = ()
inputs1 = x1 # NOT in a tuple
inputs2 = (x1, x2)
sig0 = shapes.signature(inputs0)
sig1 = shapes.signature(inputs1)
sig2 = shapes.signature(inputs2)
# pylint: disable=g-generic-assert
self.assertEqual(len(sig0), 0)
self.assertEqual(len(sig1), 1)
self.assertEqual(len(sig2), 2)
# pylint: enable=g-generic-assert
if __name__ == '__main__':
absltest.main()
|
okuta/chainer
|
chainer/testing/parameterized.py
|
Python
|
mit
| 4,970
| 0.000201
|
import functools
import itertools
import types
import unittest
import six
from chainer.testing import _bundle
from chainer import utils
def _param_to_str(obj):
if isinstance(obj, type):
return obj.__name__
return repr(obj)
def _shorten(s, maxlen):
# Shortens the string down to maxlen, by replacing the middle part with
# a 3-dots string '...'.
ellipsis = '...'
if len(s) <= maxlen:
return s
n1 = (maxlen - len(ellipsis)) // 2
n2 = maxlen - len(ellipsis) - n1
s = s[:n1] + ellipsis + s[-n2:]
assert len(s) == maxlen
return s
def _make_class_name(base_class_name, i_param, param):
# Creates a class name for a single combination of parameters.
SINGLE_PARAM_MAXLEN = 100 # Length limit of a single parameter value
PARAMS_MAXLEN = 5000 # Length limit of the whole parameters part
param_strs = [
'{}={}'.format(k, _shorten(_param_to_str(v), SINGLE_PARAM_MAXLEN))
for k, v in param.items()]
param_strs = _shorten(', '.join(param_strs), PARAMS_MAXLEN)
cls_name = '{}_param_{}_{{{}}}'.format(
base_class_name, i_param, param_strs)
return cls_name
def _parameterize_test_case
|
_generator(base, params):
# Defines the logic to generate parameterized test case classes.
for i, param in enumerate(params):
cls_name = _make_class_name(base.__name__, i, par
|
am)
def __str__(self):
name = base.__str__(self)
return '%s parameter: %s' % (name, param)
mb = {'__str__': __str__}
for k, v in six.iteritems(param):
if isinstance(v, types.FunctionType):
def create_new_v():
f = v
def new_v(self, *args, **kwargs):
return f(*args, **kwargs)
return new_v
mb[k] = create_new_v()
else:
mb[k] = v
def method_generator(base_method):
# Generates a wrapped test method
# Bind to a new variable.
param2 = param
@functools.wraps(base_method)
def new_method(self, *args, **kwargs):
try:
return base_method(self, *args, **kwargs)
except unittest.SkipTest:
raise
except Exception as e:
s = six.StringIO()
s.write('Parameterized test failed.\n\n')
s.write('Base test method: {}.{}\n'.format(
base.__name__, base_method.__name__))
s.write('Test parameters:\n')
for k, v in six.iteritems(param2):
s.write(' {}: {}\n'.format(k, v))
utils._raise_from(e.__class__, s.getvalue(), e)
return new_method
yield (cls_name, mb, method_generator)
def parameterize(*params):
# TODO(niboshi): Add documentation
return _bundle.make_decorator(
lambda base: _parameterize_test_case_generator(base, params))
def _values_to_dicts(names, values):
assert isinstance(names, six.string_types)
assert isinstance(values, (tuple, list))
def safe_zip(ns, vs):
if len(ns) == 1:
return [(ns[0], vs)]
assert isinstance(vs, (tuple, list)) and len(ns) == len(vs)
return zip(ns, vs)
names = names.split(',')
params = [dict(safe_zip(names, value_list)) for value_list in values]
return params
def from_pytest_parameterize(names, values):
# Pytest-style parameterization.
# TODO(niboshi): Add documentation
return _values_to_dicts(names, values)
def parameterize_pytest(names, values):
# Pytest-style parameterization.
# TODO(niboshi): Add documentation
return parameterize(*from_pytest_parameterize(names, values))
def product(parameter):
# TODO(niboshi): Add documentation
if isinstance(parameter, dict):
return product([
_values_to_dicts(names, values)
for names, values in sorted(parameter.items())])
elif isinstance(parameter, list):
# list of lists of dicts
if not all(isinstance(_, list) for _ in parameter):
raise TypeError('parameter must be list of lists of dicts')
if not all(isinstance(_, dict) for l in parameter for _ in l):
raise TypeError('parameter must be list of lists of dicts')
lst = []
for dict_lst in itertools.product(*parameter):
a = {}
for d in dict_lst:
a.update(d)
lst.append(a)
return lst
else:
raise TypeError(
'parameter must be either dict or list. Actual: {}'.format(
type(parameter)))
def product_dict(*parameters):
# TODO(niboshi): Add documentation
return [
{k: v for dic in dicts for k, v in six.iteritems(dic)}
for dicts in itertools.product(*parameters)]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.