code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import logging
from telegram import ParseMode
from telegram.ext import CommandHandler
from . import rollem
commands = ["roll"]
logger = logging.getLogger(__package__)
def register(dp):
dp.add_handler(CommandHandler(commands, roll))
logger.info(f"Registered for commands {commands}")
def roll(update, context):
user = update.message.from_user.name
if not context.args:
request = ""
else:
request = context.args[0]
parsed_request = interpret(request)
try:
result = rollem.roll(parsed_request)
formula = result["visual"]
total = result["total"]
msg = f"{user} rolled {request}:\n{formula}\n= <strong>{total}</strong>"
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
except rollem.InvalidFormatEquationException:
logger.error("Invalid format: " + request)
update.message.reply_text("Invalid format")
def interpret(request):
if not request:
return "4dF"
else:
return request
|
xurxodiz/iria
|
iria/modules/dice/dice.py
|
Python
|
mit
| 1,022
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
# PythenMusicDeamon (pyMD) Server
#
# $Id: $
#
# Copyright (c) 2017 Anna-Sophia Schroeck <annasophia.schroeck at outlook.de>
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = "annas"
__date__ = "$05.02.2017 04:39:03$"
import vlc
import os
import sys
import hashlib
import socket
import pyMD
import pyaes
from enum import Enum
if __name__ == "__main__":
def end_callback(event, vlcplayer):
vlcplayer.setend()
def pos_callback(event, vlcplayer):
vlcplayer.m_playerpos = vlcplayer.get_vlc().get_position()
class status(Enum):
INIT = 1
LOAD = 2
PLAY = 3
PAUSED = 4
STOP = 5
class vlcplayer:
MusicList = []
def __init__(self,config):
print("Start vlc system")
self.m_data = config.get_music_path()
self.m_playerpos = 0
self.m_vlc = vlc.Instance("--no-xlib")
self.m_player = self.m_vlc.media_player_new()
self.event_manager =self.m_player.event_manager()
self.event_manager.event_attach(vlc.EventType.MediaPlayerEndReached, end_callback, self)
self.event_manager.event_attach(vlc.EventType.MediaPlayerPositionChanged, pos_callback, self)
self.m_player.audio_set_volume(config.get_music_volume())
self.m_status = status.INIT
print("Create music list")
vlcplayer.MusicList = [f for f in os.listdir(self.m_data) if os.path.isfile(os.path.join(self.m_data, f))]
def setfile(self,name):
file = os.path.join(self.m_data,name)
Media = self.m_vlc.media_new(str(file))
self.m_player.set_media(Media)
self.m_status = status.LOAD
return "load file: " + file
def stream(self, file):
Media = self.m_vlc.media_new(str(file))
self.m_player.set_media(Media)
self.m_status = status.LOAD
return ""
def play(self):
self.m_player.play()
self.m_status = status.PLAY
return "play file"
def get_vlc(self):
return self.m_player
def get_status(self):
return self.m_status
def list_music(self):
for title in vlcplayer.MusicList:
print( title)
def setend(self):
self.m_status = status.STOP
class pymusic:
def __init__(self):
self.m_cfg = pyMD.server_config()
self.m_player = vlcplayer(self.m_cfg)
self.m_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.m_socket.bind((self.m_cfg.get_server_addr(),
self.m_cfg.get_server_port()))
def start(self):
print ("Listening on %s:%d...\n" % (self.m_cfg.get_server_addr(),
self.m_cfg.get_server_port()))
print ("The hash for the client, check the configuration.")
print ("Server and client must have the same hash.")
_hash = self.m_cfg.get_server_hash()
while True:
try:
data, addr = self.m_socket.recvfrom(2048)
crtxt = pyMD.hexToByte(data.decode("utf-8"))
crypt_aes = pyaes.AESModeOfOperationCTR(_hash)
plaintext = crypt_aes.encrypt(crtxt)
args = plaintext.decode("utf-8").split("#")
#com#argument
if "load" in args[0]:
self.load(args[1], addr)
elif "stream" in args[0]:
self.stream(args[1], addr)
elif 'play' in args[0]:
self.play(addr)
elif 'getdb' in args[0]:
i = len(vlcplayer.MusicList)
self.sendinfos(addr, i, vlcplayer.MusicList)
elif 'getpos' in args[0]:
self.getposition(addr)
elif 'getstatus' in args[0]:
self.getstatus(addr)
elif 'help' in args[0]:
self.help(addr)
elif pyMD.CL_HELLO in args[0]:
print("Client connected ")
self.sendinfo(addr, pyMD.PYMDString)
elif pyMD.CL_EXIT in args[0]:
print("Client disconected ")
self.sendinfo(addr, "bye bye")
else:
self.help(addr)
except:
pass
def load(self, cmd, addr):
self.sendinfo(addr, self.m_player.setfile(cmd))
return ex
def play(self, addr):
self.sendinfo(addr, self.m_player.play())
return ex
def stream(self, link, addr):
self.sendinfo(addr, self.m_player.stream(link))
def getposition(self, addr):
if self.m_player.get_status() == pyMD.status.STOP:
self.sendinfo(addr, str(0))
else:
self.sendinfo(addr, str(self.m_player.m_playerpos))
def getstatus(self, addr):
self.sendinfo(addr, str(self.m_player.get_status()))
def getlenght(self):
pass
def help(self, addr):
msg = ["getdb = get the current files in directory data",
"load#{music title} = load music file from db example: load#music.ogg",
"stream#{http addr}",
"getpos = get current play position",
"play = start the music",
"getstatus = get current status"]
i = len(msg)
self.sendinfos(addr, i, msg)
def sendinfos(self, addr, i, args):
self.cryptsend(str(i), addr)
for arg in args:
self.cryptsend(arg, addr)
def sendinfo(self, addr, info):
self.cryptsend(str(1), addr)
self.cryptsend(info, addr)
def cryptsend(self, plain, addr):
_hash = self.m_cfg.get_server_hash()
crypt_aes = pyaes.AESModeOfOperationCTR(_hash)
ciphertext = pyMD.byteToHex(crypt_aes.encrypt(plain))
self.m_socket.sendto(str.encode(ciphertext), addr)
print (pyMD.PYMDString)
print (pyMD.AUTHOR)
print ()
pm = pymusic()
pm.start()
|
RoseLeBlood/pyMD
|
pyMD-Server.py
|
Python
|
mit
| 7,762
|
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import Provider, ProviderAccount
class GitHubAccount(ProviderAccount):
pass
class GitHubProvider(Provider):
id = 'github'
name = 'GitHub'
package = 'allauth.socialaccount.providers.github'
account_class = GitHubAccount
providers.registry.register(GitHubProvider)
|
uroslates/django-allauth
|
allauth/socialaccount/providers/github/models.py
|
Python
|
mit
| 373
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2015, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from unittest import TestCase
import time
from multiprocessing import Process
from clay.factory import MessageFactory
from clay.serializer import AvroSerializer
from clay.messenger import MQTTMessenger, MQTTReceiver
from clay.exceptions import MessengerErrorNoQueue, MessengerErrorConnectionRefused
from tests import TEST_CATALOG, RABBIT_QUEUE, RABBIT_EXCHANGE
class TestMQTT(TestCase):
def setUp(self):
self.avro_factory = MessageFactory(AvroSerializer, TEST_CATALOG)
self.avro_message = self.avro_factory.create('TEST')
self.avro_message.id = 1111111
self.avro_message.name = "aaa"
self.avro_encoded = '\x00\x10\x8e\xd1\x87\x01\x06aaa'
self.complex_avro_message = self.avro_factory.create('TEST_COMPLEX')
self.complex_avro_message.id = 1111111
self.complex_avro_message.name = "aaa"
self.complex_avro_message.array_complex_field.add()
self.complex_avro_message.array_complex_field[0].field_1 = "bbb"
self.complex_avro_message.array_simple_field.add()
self.complex_avro_message.array_simple_field[0] = "ccc"
self.complex_avro_encoded = '\x02(\x8e\xd1\x87\x01\x06aaa\x02\x06bbb\x00\x02\x06ccc\x00'
def tearDown(self):
self._reset()
def _reset(self):
pass
def test_mqtt_transaction_no_response(self):
def handler(message_body, message_type):
self.assertEqual(message_body, self.avro_encoded)
self.assertEqual(message_type, self.avro_message.message_type)
broker = MQTTReceiver()
broker.application_name = RABBIT_EXCHANGE
broker.set_queue(RABBIT_QUEUE, False, False)
broker.handler = handler
p = Process(target=broker.run)
p.start()
time.sleep(1)
messenger = MQTTMessenger()
messenger.application_name = RABBIT_EXCHANGE
messenger.add_queue(RABBIT_QUEUE, False, False)
result = messenger.send(self.avro_message)
self.assertEqual(result, None)
p.terminate()
p.join()
def test_mqtt_producer_server_down(self):
messenger = MQTTMessenger('localhost', 20000) # non existent rabbit server
messenger.application_name = RABBIT_EXCHANGE
messenger.add_queue(RABBIT_QUEUE, False, False)
result = messenger.send(self.avro_message)
self.assertIsNone(result)
self.assertEqual(messenger._spooling_queue.qsize(), 1)
def test_mqtt_producer_non_existent_queue(self):
self._reset()
messenger = MQTTMessenger()
# the queue has not been specified yet
with self.assertRaises(MessengerErrorNoQueue):
messenger.send(self.avro_message)
messenger.application_name = RABBIT_EXCHANGE
messenger.add_queue(RABBIT_QUEUE, False, False)
result = messenger.send(self.avro_message)
self.assertIsNone(result)
self.assertEqual(messenger._spooling_queue.qsize(), 0)
def test_mqtt_broker_server_down(self):
def handler(message_body, message_type):
self.assertEqual(message_body, self.avro_encoded)
self.assertEqual(message_type, self.avro_message.message_type)
broker = MQTTReceiver('localhost', 20000) # non existent rabbit server
broker.handler = handler
broker.application_name = RABBIT_EXCHANGE
broker.set_queue(RABBIT_QUEUE, False, True)
with self.assertRaises(MessengerErrorConnectionRefused):
broker.run()
|
crs4/clay
|
tests/test_mqtt.py
|
Python
|
mit
| 4,621
|
# -*- coding: utf-8 -*-
import os
import uuid
import signal
import docker
import tarfile
import checksumdir
from contextlib import contextmanager
from logzero import logger
from config import DEFAULT_LIMITS, CPU_TO_REAL_TIME_FACTOR, DEFAULT_GENERATE_FILE_SIZE, TEMP_DIR, WORKING_DIR
from exceptions import CrazyBoxError, DockerError
from docker.models.containers import Container
from requests.packages import urllib3
from docker.errors import APIError, DockerException, NotFound, ImageNotFound
from requests.exceptions import RequestException, ReadTimeout
from dateutil.parser import parse
# docker lower level functions
try:
client = docker.from_env()
api_client = docker.APIClient()
except Exception as e:
logger.exception(e)
raise CrazyBoxError(e)
def is_killed_by_sigkill_or_sigxcpu(status):
return status - 128 in [signal.SIGKILL, signal.SIGXCPU]
def merge_limits(limits=None):
if not limits:
return DEFAULT_LIMITS
is_real_time_specified = 'real_time' in limits
for limit_name, default_value in DEFAULT_LIMITS.items():
if limit_name not in limits:
limits[limit_name] = default_value
if not is_real_time_specified:
limits['real_time'] = limits['cpu_time'] * CPU_TO_REAL_TIME_FACTOR
return limits
def generate_ulimits(limits):
ulimits = []
cpu_time = int(limits['cpu_time'])
ulimits.append({'name': 'cpu', 'soft': cpu_time, 'hard': cpu_time})
if 'file_size' in limits:
fsize = limits['file_size']
else:
fsize = DEFAULT_GENERATE_FILE_SIZE
# fsize: 1 block ?= 1KB
# update: we can view block size using command: "stat -f ."
# in my laptop, 1 block = 4KB
# we can view all parameter by "cat /etc/security/limits.conf"
# introduction: http://blog.csdn.net/lenk2010/article/details/21158373
# TODO: test file size limit
# 1. 10 * 1024 * 1024 =? 10MB output file size ?
ulimits.append({'name': 'fsize', 'soft': fsize, 'hard': fsize})
return ulimits
def generate_args(time_limit, memory_limit, file_size_limit):
limits = merge_limits({'cpu_time': time_limit, 'memory': memory_limit})
limits.update({'file_size': file_size_limit})
ulimits = generate_ulimits(limits)
return limits['real_time'], str(int(memory_limit)) + 'm', ulimits
def generate_volumes(volume_name, data_dir=None):
if data_dir:
return {volume_name: {'bind': WORKING_DIR, 'mode': 'rw'},
data_dir: {'bind': '/data/', 'mode': 'ro'}}
else:
return {volume_name: {'bind': WORKING_DIR, 'mode': 'rw'}}
def inspect_container_state(container: Container):
try:
container_info = api_client.inspect_container(container)
except (RequestException, DockerException) as e:
raise DockerError(str(e))
started_at = parse(container_info['State']['StartedAt'])
finished_at = parse(container_info['State']['FinishedAt'])
duration = finished_at - started_at
duration_seconds = duration.total_seconds()
if duration_seconds < 0:
duration_seconds = -1
return {
'duration': duration_seconds,
'oom_killed': container_info['State'].get('OOMKilled', False),
}
def get_container_output(container: Container):
try:
stdout = container.logs(stdout=True, stderr=False)
stderr = container.logs(stdout=False, stderr=True)
except (RequestException, DockerException):
return b'', b''
return stdout, stderr
# docker container upper functions
def create_container(container_name, command, volume_name,
time_limit, memory_limit=512 * 1024 * 1024, file_size_limit=10 * 1024 * 1024, data_dir=None):
real_time_limit, memory, ulimits = generate_args(time_limit, memory_limit, file_size_limit)
# logger.debug("container limit: %sS %s", real_time_limit, memory.upper())
volumes = generate_volumes(volume_name, data_dir)
try:
crazybox = client.containers.create(image='crazybox:latest',
name=container_name,
command=command,
mem_limit=memory, memswap_limit=memory,
ulimits=ulimits,
working_dir=WORKING_DIR,
network_disabled=True,
volumes=volumes,
detach=True)
except ImageNotFound:
logger.exception("No image found: [crazybox:latest]")
raise CrazyBoxError("No image found: [crazybox:latest]")
return crazybox, real_time_limit
def run_container(container: Container, real_time):
container.start()
timeout = False
exit_code = None
try:
exit_code = container.wait(timeout=real_time)
except ReadTimeout:
timeout = True
except (RequestException, DockerException) as ex:
if isinstance(ex, RequestException):
wrapped_exc = ex.args[0]
if isinstance(wrapped_exc, urllib3.exceptions.ReadTimeoutError):
timeout = True
if not timeout:
raise DockerError(str(ex))
result = {
'exit_code': exit_code,
'stdout': b'',
'stderr': b'',
'duration': None, # s
'timeout': timeout,
'oom_killed': False,
}
if exit_code is not None:
result['stdout'], result['stderr'] = get_container_output(container)
state = inspect_container_state(container.id)
result.update(state)
if is_killed_by_sigkill_or_sigxcpu(exit_code) and not state['oom_killed']:
# SIGKILL/SIGXCPU is sent but not by out of memory killer
result['timeout'] = True
return result
# other functions
@contextmanager
def working_volume():
volume_name = 'crazybox-' + str(uuid.uuid4())
logger.info("Creating new docker volume for working directory")
try:
try:
client.volumes.create(name=volume_name)
except APIError:
logger.exception("Failed to create a docker volume")
raise DockerError(str(e))
logger.info("New docker volume is created: %s", volume_name)
yield volume_name
finally:
logger.info("Removing the docker volume: %s", volume_name)
try:
client.volumes.get(volume_name).remove(force=True)
except NotFound:
logger.warning("Failed to remove the docker volume, it doesn't exist")
except APIError:
logger.exception("Failed to remove the docker volume, try prune unused container first: ")
ret = client.containers.prune()
logger.info("SpaceReclaimed: %s, ContainersDeleted: %s", ret['SpaceReclaimed'], ret['ContainersDeleted'])
ret = client.volumes.prune()
logger.info("SpaceReclaimed: %s, VolumesDeleted: %s", ret['SpaceReclaimed'], ret['VolumesDeleted'])
else:
logger.info("Docker volume removed")
@contextmanager
def compress_code(src_code, file_name_suffix, name=None):
if not name:
name = str(uuid.uuid4())
file_path = os.path.join(TEMP_DIR, name + file_name_suffix)
tar_file_path = os.path.join(TEMP_DIR, name + '.tar')
try:
with open(file_path, 'w') as file:
file.write(src_code)
with tarfile.open(tar_file_path, 'w') as file:
file.add(file_path, arcname=name + file_name_suffix)
logger.info("New temporary file is created: %s", name)
yield name
finally:
try:
os.remove(file_path)
os.remove(tar_file_path)
except Exception as ex:
logger.exception("Failed to remove the temporary file: %s", str(ex))
else:
logger.info("temporary file removed")
@contextmanager
def extract_tar(tar_path):
out_file_path = None
try:
if os.path.exists(tar_path):
tar = tarfile.open(tar_path)
file_name = tar.getnames()[0]
tar.extract(file_name, TEMP_DIR)
out_file_path = os.path.join(TEMP_DIR, file_name)
yield out_file_path
finally:
if os.path.exists(tar_path):
os.remove(tar_path)
if os.path.exists(out_file_path):
os.remove(out_file_path)
def replace_arg(command, src_path, exe_path, max_memory=None):
try:
command = command.format(src_path=src_path, exe_path=exe_path)
except Exception as ex:
logger.debug("replace command[%s] with src path[%s]: %s", command, src_path, ex)
try:
command = command.format(src_path=src_path)
except Exception as ex:
logger.debug("replace command[%s] with exe path[%s]: %s", command, src_path, ex)
if max_memory:
try:
command = command.format(max_memory=max_memory)
except Exception as ex:
logger.debug("replace command[%s] with max memory[%s]: %s", command, src_path, ex)
return command
def get_dir_hash(directory):
if os.path.exists(directory):
return checksumdir.dirhash(directory, 'sha256')
else:
return -1
|
USTB-LETTers/judger
|
utils.py
|
Python
|
mit
| 9,209
|
import os
import random
import numpy as np
from scipy.misc import imresize, imread
from scipy.ndimage import zoom
from collections import defaultdict
DATA_MEAN = np.array([[[123.68, 116.779, 103.939]]])
def preprocess_img(img, input_shape):
img = imresize(img, input_shape)
img = img - DATA_MEAN
img = img[:, :, ::-1]
img.astype('float32')
return img
def update_inputs(batch_size = None, input_size = None, num_classes = None):
return np.zeros([batch_size, input_size[0], input_size[1], 3]), \
np.zeros([batch_size, input_size[0], input_size[1], num_classes])
def data_generator_s31(datadir='', nb_classes = None, batch_size = None, input_size=None, separator='_', test_nmb=50):
if not os.path.exists(datadir):
print("ERROR!The folder is not exist")
#listdir = os.listdir(datadir)
data = defaultdict(dict)
image_dir = os.path.join(datadir, "imgs")
image_paths = os.listdir(image_dir)
for image_path in image_paths:
nmb = image_path.split(separator)[0]
data[nmb]['image'] = image_path
anno_dir = os.path.join(datadir, "maps_bordered")
anno_paths = os.listdir(anno_dir)
for anno_path in anno_paths:
nmb = anno_path.split(separator)[0]
data[nmb]['anno'] = anno_path
values = data.values()
random.shuffle(values)
return generate(values[test_nmb:], nb_classes, batch_size, input_size, image_dir, anno_dir), \
generate(values[:test_nmb], nb_classes, batch_size, input_size, image_dir, anno_dir)
def generate(values, nb_classes, batch_size, input_size, image_dir, anno_dir):
while 1:
random.shuffle(values)
images, labels = update_inputs(batch_size=batch_size,
input_size=input_size, num_classes=nb_classes)
for i, d in enumerate(values):
img = imresize(imread(os.path.join(image_dir, d['image']), mode='RGB'), input_size)
y = imread(os.path.join(anno_dir, d['anno']), mode='L')
h, w = input_size
y = zoom(y, (1.*h/y.shape[0], 1.*w/y.shape[1]), order=1, prefilter=False)
y = (np.arange(nb_classes) == y[:,:,None]).astype('float32')
assert y.shape[2] == nb_classes
images[i % batch_size] = img
labels[i % batch_size] = y
if (i + 1) % batch_size == 0:
yield images, labels
images, labels = update_inputs(batch_size=batch_size,
input_size=input_size, num_classes=nb_classes)
|
Vladkryvoruchko/PSPNet-Keras-tensorflow
|
utils/preprocessing.py
|
Python
|
mit
| 2,350
|
# -*- coding: utf-8 -*-
"""
Common templates tags for porticus
"""
from six import string_types
from django.conf import settings
from django import template
from django.utils.safestring import mark_safe
from django.shortcuts import get_object_or_404
from porticus.models import Gallery, Album
register = template.Library()
class GalleryList(template.Node):
"""
Gallery list (without any pagination) as a HTML fragment
"""
def __init__(self, template_varname=None, instance_varname=None):
"""
:type insert_instance_varname: string or object ``django.db.models.Model``
:param insert_instance_varname: Instance variable name or a string slug for
the current gallery
:type template_varname: string
:param template_varname: (optional) ...
"""
self.template_varname = None
if template_varname:
self.template_varname = template.Variable(template_varname)
self.instance_varname = None
if instance_varname:
self.instance_varname = template.Variable(instance_varname)
def render(self, context):
"""
:type context: object ``django.template.Context``
:param context: Context tag object
:rtype: string
:return: the HTML for the list
"""
# Default assume this is directly an instance
current_gallery = None
if self.instance_varname:
current_gallery = self.instance_varname.resolve(context)
if current_gallery:
# Assume this is slug
if isinstance(current_gallery, string_types):
current_gallery = Gallery.objects.get(slug=current_gallery, publish=True)
# Only accept Gallery model instance
elif not isinstance(current_gallery, Gallery):
raise template.TemplateSyntaxError("You can only specify a Gallery instance or a slug")
# Resolve optional template path
template_path = settings.PORTICUS_GALLERIES_TEMPLATE_FRAGMENT
if self.template_varname:
try:
resolved_var = self.template_varname.resolve(context)
except template.VariableDoesNotExist:
pass
else:
if resolved_var:
template_path = resolved_var
gallery_list = Gallery.publish.all()
subcontext = {
'object_list': gallery_list,
'current_gallery': current_gallery,
}
html = template.loader.get_template(template_path).render(template.Context(subcontext))
return mark_safe(html)
@register.tag(name="porticus_gallery_list")
def do_porticus_gallery_list(parser, token):
"""
Display the gallery list
Usage : ::
{% porticus_gallery_list [Optional template path] [Optional Gallery slug or instance] %}
"""
args = token.split_contents()
return GalleryList(*args[1:])
do_porticus_gallery_list.is_safe = True
class AlbumFragment(template.Node):
"""
Album ressources as a HTML fragment
"""
def __init__(self, instance_varname, template_varname=None):
"""
:type insert_instance_varname: string or object ``django.db.models.Model``
:param insert_instance_varname: Instance variable name or a string slug
:type template_varname: string
:param template_varname: (optional) ...
"""
self.instance_varname = template.Variable(instance_varname)
self.template_varname = None
if template_varname:
self.template_varname = template.Variable(template_varname)
def render(self, context):
"""
:type context: object ``django.template.Context``
:param context: Context tag object
:rtype: string
:return: the HTML for the album
"""
# Default assume this is directly an instance
album_instance = self.instance_varname.resolve(context)
# Assume this is slug
if isinstance(album_instance, string_types):
album_instance = Album.objects.get(slug=album_instance, publish=True)
# Get the album's ressources
ressources_list = album_instance.ressource_set.filter(publish=True)
# Resolve optional template path
template_path = settings.PORTICUS_ALBUM_TEMPLATE_FRAGMENT
if self.template_varname:
try:
template_path = self.template_varname.resolve(context)
except template.VariableDoesNotExist:
pass
subcontext = {
'gallery_object': album_instance.gallery,
'album_object': album_instance,
'ressource_list': ressources_list,
}
html = template.loader.get_template(template_path).render(template.Context(subcontext))
return mark_safe(html)
@register.tag(name="porticus_album_fragment")
def do_porticus_album_fragment(parser, token):
"""
Display a album
Usage : ::
{% porticus_album_fragment [Optional Album slug or instance] [Optional template path] %}
"""
args = token.split_contents()
if len(args) < 2:
raise template.TemplateSyntaxError("You need to specify at less an \"Album\" instance or slug")
else:
return AlbumFragment(*args[1:])
do_porticus_album_fragment.is_safe = True
def porticus_album_list(gallery_instance):
"""
Return a queryset list from a Gallery, this is a flat list of albums, not recursive
Usage : ::
{% porticus_album_list [Gallery slug or instance] as gallery_albums %}
"""
if isinstance(gallery_instance, string_types):
return get_object_or_404(Gallery, slug=gallery_instance, publish=True).album_set.filter(level=0)
return gallery_instance.album_set.filter(level=0)
register.assignment_tag(porticus_album_list)
#@register.filter(name='embed')
#def embed(value):
#return value.replace('watch?v=', 'embed/')
|
emencia/porticus
|
porticus/templatetags/porticus_tags.py
|
Python
|
mit
| 6,013
|
"""Authentication process"""
import cherrypy
import lib.db_users as db_users
def check_auth(required=True, user_role=None):
"""Check authentication"""
if required:
user = db_users.user_by_session(cherrypy.session.id)
if user == None:
cherrypy.lib.sessions.expire()
raise cherrypy.HTTPRedirect("/login")
elif user_role != None and user['role'] != user_role:
raise cherrypy.HTTPRedirect("/login")
def check_rights():
"""Check if the user has the right to visit a page"""
try:
temp = cherrypy.request.path_info.split("/")[2]
except IndexError:
raise cherrypy.HTTPRedirect("/")
view_user = db_users.user_by_name(temp)
if view_user == None:
raise cherrypy.HTTPError(404, "Profile not found")
elif view_user['privacy'] == 'private':
user = db_users.user_by_session(cherrypy.session.id)
if user == None or user['username'] != view_user['username']:
raise cherrypy.HTTPError(404, "Profile not public")
|
JB26/Bibthek
|
lib/auth.py
|
Python
|
mit
| 1,040
|
# -*- coding: utf-8 -*-
""" Utility methods for motif
"""
import csv
from mir_eval import melody
import numpy as np
import os
def validate_contours(index, times, freqs, salience):
'''Check that contour input is well formed.
Parameters
----------
index : np.array
Array of contour numbers
times : np.array
Array of contour times
freqs : np.array
Array of contour frequencies
salience : np.array
Array of contour saliences
sample_rate : float
Contour sample rate.
audio_filepath : str
Path to audio file contours were extracted from
'''
N = len(index)
if any([len(times) != N, len(freqs) != N, len(salience) != N]):
raise ValueError(
"the arrays index, times, freqs, and salience "
"must be the same length."
)
def format_contour_data(frequencies):
""" Convert contour frequencies to cents + voicing.
Parameters
----------
frequencies : np.array
Contour frequency values
Returns
-------
est_cent : np.array
Contour frequencies in cents
est_voicing : np.array
Contour voicings
"""
est_freqs, est_voicing = melody.freq_to_voicing(frequencies)
est_cents = melody.hz2cents(est_freqs)
return est_cents, est_voicing
def format_annotation(new_times, annot_times, annot_freqs):
""" Format an annotation file and resample to a uniform timebase.
Parameters
----------
new_times : np.array
Times to resample to
annot_times : np.array
Annotation time stamps
annot_freqs : np.array
Annotation frequency values
Returns
-------
ref_cent : np.array
Annotation frequencies in cents at the new timescale
ref_voicing : np.array
Annotation voicings at the new timescale
"""
ref_freq, ref_voicing = melody.freq_to_voicing(annot_freqs)
ref_cent = melody.hz2cents(ref_freq)
ref_cent, ref_voicing = melody.resample_melody_series(
annot_times, ref_cent, ref_voicing, new_times,
kind='linear'
)
return ref_cent, ref_voicing
def get_snippet_idx(snippet, full_array):
""" Find the indices of ``full_array`` where ``snippet`` is present.
Assumes both ``snippet`` and ``full_array`` are ordered.
Parameters
----------
snippet : np.array
Array of ordered time stamps
full_array : np.array
Array of ordered time stamps
Returns
-------
idx : np.array
Array of booleans indicating where in ``full_array`` ``snippet``
is present.
"""
idx = np.logical_and(
full_array >= snippet[0], full_array <= snippet[-1]
)
return idx
def load_annotation(annotation_fpath, n_freqs=1, to_array=True,
rm_zeros=False, delimiter=','):
""" Load an annotation from a csv file.
Parameters
----------
annotation_fpath : str
Path to annotation file.
n_freqs : int or None
Number of frequencies to read, or None to use max
to_array : bool
If True, returns annot_freqs as a numpy array
If False, returns annot_freqs as a list of lists.
Returns
-------
annot_times : array
Annotation time stamps
annot_freqs : array
Annotation frequency values
"""
end_idx = None if n_freqs is None else n_freqs + 1
if not os.path.exists(annotation_fpath):
raise IOError("The annotation path {} does not exist.")
annot_times = []
annot_freqs = []
with open(annotation_fpath, 'r') as fhandle:
reader = csv.reader(fhandle, delimiter=',')
for row in reader:
annot_times.append(row[0])
if rm_zeros:
temp_freqs = [r for r in row[1:end_idx] if float(r) > 0]
else:
temp_freqs = [r for r in row[1:end_idx]]
annot_freqs.append(temp_freqs)
annot_times = np.array(annot_times, dtype=float)
annot_freqs = [np.array(f).astype(float) for f in annot_freqs]
if to_array:
annot_freqs = np.array(annot_freqs, dtype=float).flatten()
return annot_times, annot_freqs
|
rabitt/motif
|
motif/utils.py
|
Python
|
mit
| 4,174
|
#!/usr/bin/env python
# This setup relies on setuptools since distutils is insufficient and
# badly hacked code
from setuptools import setup, find_packages
version = '0.0.1'
author = 'David-Leon Pohl'
author_email = 'david-leon.pohl@rub.de'
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='hass_scripts',
version=version,
description='My collection of home assistant scripts.',
url='https://github.com/DavidLP/hass_scripts',
license='MIT License',
author=author,
maintainer=author,
author_email=author_email,
maintainer_email=author_email,
packages=find_packages(),
install_requires=required,
# accept all data files and directories matched by MANIFEST.in or found in
# source control
include_package_data=True,
package_data={
'': ['README.*', 'VERSION'], 'docs': ['*'], 'examples': ['*']},
platforms='linux',
entry_points={
'console_scripts': [
'hass_scripts = hass_scripts.__main__:main'
]
},
)
|
DavidLP/hass_scripts
|
setup.py
|
Python
|
mit
| 1,045
|
#!/usr/bin/env python3
from setuptools import find_packages,setup
VERSION = '0.1.0.dev1'
install_requires = [
'beautifulsoup4>=4.4.1',
'requests>=2.4.3',
]
setup(
name="Library API",
version=VERSION,
description="Privides access to various libraries (book borrowing places, not code libraries).",
author=', '.join((
'Benjamin Howe <ben@bh96.uk>',
'Scott Street <scott@spru.sr>',
)),
url="https://github.com/BenjaminEHowe/library-api",
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=install_requires,
license="MIT",
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
BenjaminEHowe/library-api
|
setup.py
|
Python
|
mit
| 830
|
import cv2
import numpy as np
HESSIAN_THRESHOLD = 400
FLANN_INDEX_KDTREE = 0
def test_surfmatch(ref_img, query_img, num_kp=None, min_match=10):
'''
Tests the SURF matcher by finding an homography and counting the
percentage of inliers in said homography.
Parameters
----------
ref_img: ndarray
Reference image, must have just one channel (greyscale).
query_img: ndarray
Query image, must have just one channel (greyscale).
num_kp: int or None
Number of keypoints to extract, None if all that are found.
min_match: int
Minimum number of point matches to be considered as valid for
constructing the homography.
Returns
-------
inliers: float
The ratio between the number of point matches inside the homography and
the total number of matches.
num_kp_found: tuple (int, int)
Number of keypoints found in both images.
'''
surf = cv2.xfeatures2d.SURF_create(HESSIAN_THRESHOLD)
ref_kp, ref_desc = surf.detectAndCompute(ref_img, None)
query_kp, query_desc = surf.detectAndCompute(query_img, None)
ref_kp = ref_kp[:num_kp]
ref_desc = ref_desc[:num_kp]
query_kp = query_kp[:num_kp]
query_desc = query_desc[:num_kp]
num_kp_found = (len(ref_kp), len(query_kp))
index_params = {'algorithm': FLANN_INDEX_KDTREE, 'trees': 5}
search_params = {'checks': 50}
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(ref_desc, query_desc, k=2)
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
if len(good) >= min_match:
src_pts = np.array([ref_kp[m.queryIdx].pt for m in good])
dst_pts = np.array([query_kp[m.trainIdx].pt for m in good])
_, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
mask = mask.ravel()
else:
raise ValueError('Not enough matches were found')
inliers = np.sum(mask) / len(good)
return inliers, num_kp_found
|
sebasvega95/feature-matching
|
tests/surf_test.py
|
Python
|
mit
| 2,042
|
from movie.models import Movie
import urllib as urllib
import urllib2,json
import os,glob
from django.template import defaultfilters
import unicodedata
author_list = []
BASE='http://api.rottentomatoes.com/api/public/v1.0/'
KEY='mz7z7f9zm79tc3hcaw3xb85w'
movieURL=BASE+'movies.json'
def main():
print('starting.')
checkStrSet = set([])
for movie in Movie.objects.order_by("-theater_date"):
movieSearchURL=movieURL+'?'+urllib.urlencode({'apikey':KEY, 'q': movieName})
movieData = json.loads(urllib2.urlopen(movieSearchURL).read())
movieData = movieData['movies']
#We need to find the right movie now, because we don't want to just take the 1st result
# Filter by year.
correctMovieID=-1
for movietomatoe in movieData:
if int(movietomatoe['alternate_ids']['imdb']) == movie.imdb_id:
correctMovieID=movietomatoe['id']
break
if correctMovieID==-1:
raise Exception('Error: Cannot find movie with that year and name.')
print('Getting reviews for movie '+movie.title)
singlemoviereviews = getAuthors(movie,correctMovieID)
for review in singlemoviereviews:
checkStr = review['critic'][0:5]+review['link'][7:14]
print('checkStr is '+ checkStr)
if not checkStr in checkStrSet:
author_list.append(tempset)
print('Successfully added author set for'+movie.tite)
checkStrSet.add(checkStr)
def getAuthors(movie,movieid):
print('Get authors from'+movie.title)
title = movie.title
imdb_id = movieid
reviewSearchURL=BASE+'movies/'+str(imdb_id)+'/reviews.json?'
reviewSearchURL = reviewSearchURL+urllib.urlencode({'apikey':KEY})
print('Search url is ' + reviewSearchURL)
reviewData=json.loads(urllib2.urlopen(reviewSearchURL).read())
reviewData = reviewData['reviews']
print('Printing reviewData')
print(reviewData)
returnSet=[]
for review in reviewData:
returnSet.append({'author':review['critic'],
'link':review['links']['review']})
return returnSet
#tempset now an array of dictionaries that contain
#author and url
#Check whether author and url combination exists
#If found duplicate, set addMovie to false
#We want to have one url for each unique author
def downloadJSON(movie,directory,movieid):
print('...Grabbing JSONs for ' + movie.title)
title = movie.title
imdb_id = movieid
reviewSearchURL=BASE+'movies/'+str(imdb_id)+'/reviews.json?'
reviewSearchURL = reviewSearchURL+urllib.urlencode({'apikey':KEY})
#print('Search url is ' + reviewSearchURL)
reviewData=json.loads(urllib2.urlopen(reviewSearchURL).read())
reviewData = reviewData['reviews']
if len(reviewData) > 0:
with open(directory+'/'+str(movie.imdb_id),'wb') as fp:
json.dump(reviewData,fp)
print('...Successfully dumped JSON of ' + movie.title)
else:
print('...No reviews available for movie ' + movie.title)
print('...Skipping write')
#f = open(directory+'/'+str(imdb_id), 'w')
#f.write(reviewData)
#f.close()
#Indexes 0 to # movies - 1
def downloadJSONRange(directory,start,end):
if os.path.exists(directory) == False:
try:
os.mkdir(directory)
print('Directory created: ' + directory)
except Exception as err:
print('Error creating dir')
exit(2)
#print('destination directory is ' + directory)
for movie in Movie.objects.order_by("-theater_date")[start:end]:
print('Starting download of ' + movie.title)
decodedTitle = unicodedata.normalize('NFKD',movie.title).encode('ascii','ignore')
movieSearchURL=movieURL+'?'+urllib.urlencode({'apikey':KEY, 'q': decodedTitle})
movieData = json.loads(urllib2.urlopen(movieSearchURL).read())
movieData = movieData['movies']
#We need to find the right movie now, because we don't want to just take the 1st result
# Filter by year.
correctMovieID=-1
matchByYear = False
for movietomatoe in movieData:
try:
#print('tomatoes id get '+ movietomatoe['alternate_ids']['imdb'])
#print('database id get ' + str(movie.imdb_id))
if int(movietomatoe['alternate_ids']['imdb']) == movie.imdb_id:
correctMovieID=movietomatoe['id']
break
except KeyError:
print('...No imdb found, match by year')
matchByYear = True
except:
print('...invalid imdb id found')
if matchByYear == True:
try:
for movietomatoe in movieData:
movieYear = defaultfilters.date(movie.theater_date,'Y')
#print('tomatoes date get ' + str(movietomatoe['year']))
#print('database date get ' + movieYear)
if movietomatoe['year'] == int(movieYear):
correctMovieID = movietomatoe['id']
break
except:
print('...No year value set, skipping.')
if correctMovieID==-1:
print('...Skipping: Cannot find movie with that year ' + defaultfilters.date(movie.theater_date,'Y') + 'and name ' + movie.title)
print('...Skipping write.')
elif correctMovieID != -1:
downloadJSON(movie,directory,correctMovieID)
print('...Finished processing movie '+ movie.title)
def extractAuthors(directory,outdir,outname):
os.chdir(directory)
f = open(outdir + '/' + outname,'w')
print('listing current files')
for filename in os.listdir('.'):
print(filename)
for files in os.listdir('.'):
print('Current file is ' + files)
listofreviews = json.loads(open(files,'r').read())
for review in listofreviews:
try:
authorName = review['critic']
print('authorname is: '+authorName)
except KeyError:
print('cannot find author name')
authorName = ' '
try:
link = review['links']['review']
print('link is' + link)
except KeyError:
print('cannot find link')
link = ' '
tempstr = authorName + ' : ' + link
f.write(tempstr)
f.write('\n\n')
f.close()
|
sameenjalal/mavenize-beta
|
mavenize/lib/db/DownloadAuthors.py
|
Python
|
mit
| 6,573
|
from dolfin import has_lu_solver_method
lusolver = "superlu_dist" if has_lu_solver_method("superlu_dist") else "default"
direct = dict(
reuse = False,
iterative = False,
lusolver = lusolver,
)
direct_reuse = dict(
reuse = True,
iterative = False,
lusolver = lusolver,
luparams = dict(
symmetric = False,
same_nonzero_pattern = True,
reuse_factorization = True,),
)
bicgstab = dict(
reuse = False,
iterative = True,
lusolver = ("superlu_dist" if has_lu_solver_method("superlu_dist") else "default"),
luparams = dict(
symmetric = False,
same_nonzero_pattern = True,
reuse_factorization = False,),
ks = "bicgstab",
kp = "hypre_euclid",
kparams = dict(
maximum_iterations = 600,
monitor_convergence = False,
nonzero_initial_guess = True,
error_on_nonconvergence = True, #False,
absolute_tolerance = 1e-5,
relative_tolerance = 1e-8,
preconditioner = dict(
ilu = dict(fill_level = 1)))
)
poisson = dict(
reuse = True,
iterative = True,
lusolver = ("superlu_dist" if has_lu_solver_method("superlu_dist") else "default"),
luparams = dict(
symmetric = True,
same_nonzero_pattern = True,
reuse_factorization = True,),
ks = "cg",
kp = "hypre_euclid",
kparams = dict(
maximum_iterations = 500,
monitor_convergence = False,
relative_tolerance = 1e-6,
preconditioner = dict(
ilu = dict(fill_level = 1)))
)
stokes = dict(
#reuse = False, # DEBUG
reuse = True,
iterative = False,
lusolver = ("superlu" if has_lu_solver_method("superlu") else "default"),
luparams = dict(
symmetric = True,
same_nonzero_pattern = True,
reuse_factorization = True,),
ks = "tfqmr",
kp = "hypre_euclid",
fieldsplit = False, #True,
kparams = dict(
maximum_iterations = 1000,
monitor_convergence = False,
# large rel.tol. together with nonzero initial guess = bad idea!!!
relative_tolerance = 1e-5,
# absolute tolerance must not be too large compared with newton tol
# (but also not too low since that would be inefficient)
absolute_tolerance = 1e-5,
nonzero_initial_guess = True,
error_on_nonconvergence = False,
preconditioner = dict(
report = False,
#structure = "same_nonzero_pattern",
ilu = dict(fill_level = 1)))
)
gmres = dict(
reuse = False,
iterative = False,
lusolver = ("superlu_dist" if has_lu_solver_method("superlu_dist") else "default"),
luparams = dict(
symmetric = False,
same_nonzero_pattern = True,
reuse_factorization = False,),
ks = "gmres",
kp = "hypre_euclid",
kparams = dict(
maximum_iterations = 500,
monitor_convergence = False,
# large rel.tol. together with nonzero initial guess = bad idea!!!
relative_tolerance = 1e-12,
# absolute tolerance must not be too large compared with newton tol
# (but also not too low since that would be inefficient)
absolute_tolerance = 1e-5,
nonzero_initial_guess = True,
error_on_nonconvergence = False,
preconditioner = dict(
report = False,
#structure = "same_nonzero_pattern",
ilu = dict(fill_level = 1)))
)
|
mitschabaude/nanopores
|
nanopores/tools/solvermethods.py
|
Python
|
mit
| 3,451
|
# Given two strings, write a method to decide if one is a permutation of the other
def is_permutation(s, t):
'''
time complexity: O(NlogN)
'''
return sorted(s) == sorted(t)
def is_permutation(s, t):
'''
time complexity: O(N)
'''
from collections import Counter
return Counter(s) == Counter(t)
|
carlxshen/interview-questions
|
ctci/chapter-1/1-2.py
|
Python
|
mit
| 313
|
from __future__ import division
import sys
import os
import pyglet
from pyglet.gl import *
from pyglet.window import key
import mode
import gui
class MenuMode(mode.Mode):
name = "menu_mode"
def connect(self, controller):
super(MenuMode, self).connect(controller)
self.init_opengl()
self.init_menu()
self.bg = pyglet.sprite.Sprite(pyglet.resource.image('menu-bg.png'))
glClearColor(20/255, 20/255, 20/255, 1)
def disconnect(self):
self.bg.delete()
@property
def full_screen_label(self):
return 'Exit full screen' if self.window.fullscreen else 'Fullscreen'
def init_menu(self):
font = gui.MainMenuFont()
font2 = gui.GameMenuFont()
self.b_fullscreen = gui.Button(self.full_screen_label, font2, self.on_toggle_full_screen)
buttons = []
if len(sys.argv) > 1:
buttons.append(gui.Button('Play', font, self.on_play_pressed))
buttons.append(gui.Button('Edit', font, self.on_edit_pressed))
else:
buttons.append(gui.Button('New game', font, self.on_play_pressed))
#buttons.append(gui.Button('Continue', font, self.on_play_pressed))
buttons.append(self.b_fullscreen)
buttons.append(gui.Button('Exit', font2, self.on_exit_pressed))
self.gui.replace(buttons)
def on_toggle_full_screen(self, manager, args):
self.toggle_full_screen()
def toggle_full_screen(self):
self.window.set_fullscreen(not self.window.fullscreen)
self.window.set_minimum_size(800, 600)
self.b_fullscreen.label.text = self.full_screen_label
def on_play_pressed(self, manager, args):
self.control.switch_handler("game_mode", False)
def on_edit_pressed(self, manager, args):
self.control.switch_handler("game_mode", True)
def on_exit_pressed(self, manager, args):
sys.exit(0)
def on_draw(self):
self.window.clear()
self.bg.x = self.window.width - self.bg.width
self.bg.draw()
self.gui.draw()
def init_opengl(self):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0, self.window.width, 0, self.window.height)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
|
vickenty/ookoobah
|
ookoobah/menu_mode.py
|
Python
|
mit
| 2,284
|
import os
import sys
def pytest_configure(config):
os.environ['PYTHONPATH'] = ':'.join(sys.path)
|
xsteadfastx/ftp_rsync_backup
|
conftest.py
|
Python
|
mit
| 103
|
# -*- coding: utf-8 -*-
#
# Created on 2/7/16 by maersu
from core.utils import IOS
from django.utils.encoding import smart_str
from interactions.handlers.base import BaseWriter
import re
from translations.models import TranslatedItem
IOS_KEY_VALUE = re.compile(r'"(?P<key>.*?)"\s*?=\s*?"(?P<value>.*?)";', re.MULTILINE)
class IosParser(object):
@classmethod
def parse(cls, string, locale):
for match in IOS_KEY_VALUE.finditer(string):
key, value = match.groups()
TranslatedItem.get_or_create_from_text(key, value, system=IOS, locale=locale)
class IosWriter(BaseWriter):
@classmethod
def write(cls, string, locales_list, base_name, input_file):
contents = cls._get_contents(string, locales_list)
for match in IOS_KEY_VALUE.finditer(string):
key, value = match.groups()
for t in TranslatedItem.objects.filter(key=key, system=IOS):
for locale in locales_list:
contents[locale] = re.sub(
ur'"' + re.escape(key) + ur'"\s*=\s*".+";',
smart_str('"%s" = "%s";' % (key, t.get_display(locale))),
contents[locale]
)
return cls._write_files(contents, base_name, input_file)
|
placeB/translation-service
|
server/interactions/handlers/ios.py
|
Python
|
mit
| 1,296
|
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'image_uploader.settings')
app = Celery('image_uploader')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print("Request: {0!r}".format(self.request))
|
haridas/image_uploader_py
|
image_uploader/celery.py
|
Python
|
mit
| 416
|
import logging
import os
import time
import subprocess
def number_of_nodes():
_file = open(os.environ['HOME'] + "/machinefile", "r")
n = len ([ l for l in _file.readlines() if l.strip(' \n') != '' ])
_file.close()
return n
return
def log(msg):
logging.debug("COMPUTATION: " + msg)
return
def computation_unit(reconfiguration_port, computation_input):
# wait until platform is ready.
while (reconfiguration_port.get_actuator().value != "start"):
time.sleep(5)
log("Starting Computation.")
# compute the matrix
inputs = [ int(x) for x in computation_input.split(':') ]
inputs_size = len(inputs)
home = os.environ['HOME']
for i in range(len(inputs)) :
m = inputs[i]
log("Start (MatrixSize, Iteration) = |" + str(m) + "|" + str(i) +"|")
with reconfiguration_port.machine_file_lock:
nodes = 2 * number_of_nodes()
command = ["mpirun",
"-n", str(nodes), "-machinefile", home + "/machinefile",
home + "/repositorios/elastichpc/beta/trials/Matrix.py",
str(m), home + "/teste.mtr_" + str(i)]
log(str(command))
process = subprocess.Popen(command, stdout = subprocess.PIPE, stderr=subprocess.STDOUT)
(output, error) = process.communicate()
log("End (MatrixSize, Iteration) = |" + str(m) + "|" + str(i) +"|")
os.remove(home + "/teste.mtr_" + str(i))
log("Execution = " + str(output) + "|" + str(error))
reconfiguration_port.get_sensor().value = float(i + 1) / inputs_size
log("Progress = " + str(float(i + 1) /inputs_size))
# finish the computation
reconfiguration_port.get_actuator().value = "finished"
log("Finish Computation.")
|
jmhal/elastichpc
|
beta/trials/static/Computation.py
|
Python
|
mit
| 1,718
|
"""
Tests the methods within the flask-script file manage.py
"""
import os
import unittest
import testing.postgresql
from biblib.app import create_app
from biblib.manage import CreateDatabase, DestroyDatabase, DeleteStaleUsers
from biblib.models import Base, User, Library, Permissions
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.orm.exc import NoResultFound
from biblib.tests.base import TestCaseDatabase
class TestManagePy(TestCaseDatabase):
"""
Class for testing the behaviour of the custom manage scripts
"""
"""
Base test class for when databases are being used.
"""
def test_create_database(self):
"""
Tests the CreateDatabase action. This should create all the tables
that should exist in the database.
:return: no return
"""
# Setup the tables
CreateDatabase.run(app=self.app)
engine = create_engine(TestManagePy.postgresql_url)
connection = engine.connect()
for model in [User, Library, Permissions]:
exists = engine.dialect.has_table(connection, model.__tablename__)
self.assertTrue(exists)
# Clean up the tables
Base.metadata.drop_all(bind=self.app.db.engine)
def test_destroy_database(self):
"""
Tests the DestroyDatabase action. This should clear all the tables
that were created in the database.
:return: no return
"""
# Setup the tables
engine = create_engine(TestManagePy.postgresql_url)
connection = engine.connect()
Base.metadata.create_all(bind=self.app.db.engine)
for model in [User, Library, Permissions]:
exists = engine.dialect.has_table(connection, model.__tablename__)
self.assertTrue(exists)
DestroyDatabase.run(app=self.app)
for model in [User, Library, Permissions]:
exists = engine.dialect.has_table(connection, model.__tablename__)
self.assertFalse(exists)
def test_delete_stale_users(self):
"""
Tests that the DeleteStaleUsers action that propogates the deletion of
users from the API database to that of the microservice.
:return: no return
"""
with self.app.session_scope() as session:
# We do not add user 1 to the API database
session.execute('create table users (id integer, random integer);')
session.execute('insert into users (id, random) values (2, 7);')
session.commit()
with self.app.session_scope() as session:
try:
# Add some content to the users, libraries, and permissions within
# the microservices
user_1 = User(absolute_uid=1)
session.add(user_1)
session.commit()
user_2 = User(absolute_uid=2)
library_1 = Library(name='Lib1')
library_2 = Library(name='Lib2')
session.add_all([
user_1, user_2,
library_1, library_2
])
session.commit()
# Make some permissions
# User 1 owns library 1 and can read library 2
# User 2 owns library 2 and can read library 1
permission_user_1_library_1 = Permissions(
permissions={'read': False, 'write': False, 'admin': False, 'owner': True},
library_id=library_1.id,
user_id=user_1.id
)
permission_user_1_library_2 = Permissions(
permissions={'read': True, 'write': False, 'admin': False, 'owner': False},
library_id=library_2.id,
user_id=user_1.id
)
permission_user_2_library_1 = Permissions(
permissions={'read': True, 'write': False, 'admin': False, 'owner': False},
library_id=library_1.id,
user_id=user_2.id
)
permission_user_2_library_2 = Permissions(
permissions={'read': False, 'write': False, 'admin': False, 'owner': True},
library_id=library_2.id,
user_id=user_2.id
)
session.add_all([
permission_user_1_library_1, permission_user_1_library_2,
permission_user_2_library_1, permission_user_2_library_2
])
session.commit()
# Retain some IDs for when they are deleted
user_1_id = user_1.id
user_2_id = user_2.id
user_1_absolute_uid = user_1.absolute_uid
library_1_id = library_1.id
library_2_id = library_2.id
# Now run the stale deletion
DeleteStaleUsers().run(app=self.app)
# Check the state of users, libraries and permissions
# User 2
# 1. the user 2 should still exist
# 2. library 2 should exist
# 3. the permissions for library 2 for user 2 should exist
# 4. the permissions for library 1 for user 2 should not exist
_user_2 = session.query(User).filter(User.absolute_uid == 2).one()
self.assertIsInstance(_user_2, User)
_library_2 = session.query(Library)\
.filter(Library.id == library_2_id)\
.one()
self.assertIsInstance(_library_2, Library)
_permission_user_2_library_2 = session.query(Permissions)\
.filter(Permissions.library_id == library_2_id)\
.filter(Permissions.user_id == user_2_id)\
.one()
self.assertIsInstance(_permission_user_2_library_2, Permissions)
with self.assertRaises(NoResultFound):
session.query(Permissions)\
.filter(Permissions.library_id == library_1_id)\
.filter(Permissions.user_id == user_2_id)\
.one()
# User 1
# 1. the user should not exist
# 2. library 1 should not exist
# 3. the permissions for library 1 for user 1 should not exist
# 4. the permissions for library 2 for user 1 should not exist
with self.assertRaises(NoResultFound):
session.query(User)\
.filter(User.absolute_uid == user_1_absolute_uid).one()
with self.assertRaises(NoResultFound):
session.query(Library)\
.filter(Library.id == library_1_id)\
.one()
with self.assertRaises(NoResultFound):
session.query(Permissions)\
.filter(Permissions.library_id == library_1_id)\
.filter(Permissions.user_id == user_1_id)\
.one()
with self.assertRaises(NoResultFound):
session.query(Permissions)\
.filter(Permissions.library_id == library_2_id)\
.filter(Permissions.user_id == user_1_id)\
.one()
except Exception:
raise
finally:
# Destroy the tables
session.execute('drop table users;')
pass
if __name__ == '__main__':
unittest.main(verbosity=2)
|
adsabs/biblib-service
|
biblib/tests/unit_tests/test_manage.py
|
Python
|
mit
| 7,727
|
# This file is part of beets.
# Copyright 2011, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Matches existing metadata with canonical information to identify
releases and tracks.
"""
from __future__ import division
import logging
import re
from munkres import Munkres
from unidecode import unidecode
from beets import plugins
from beets.util import levenshtein, plurality
from beets.autotag import hooks
# Distance parameters.
# Text distance weights: proportions on the normalized intuitive edit
# distance.
ARTIST_WEIGHT = 3.0
ALBUM_WEIGHT = 3.0
# The weight of the entire distance calculated for a given track.
TRACK_WEIGHT = 1.0
# The weight of a missing track.
MISSING_WEIGHT = 0.9
# These distances are components of the track distance (that is, they
# compete against each other but not ARTIST_WEIGHT and ALBUM_WEIGHT;
# the overall TRACK_WEIGHT does that).
TRACK_TITLE_WEIGHT = 3.0
# Used instead of a global artist penalty for various-artist matches.
TRACK_ARTIST_WEIGHT = 2.0
# Added when the indices of tracks don't match.
TRACK_INDEX_WEIGHT = 1.0
# Track length weights: no penalty before GRACE, maximum (WEIGHT)
# penalty at GRACE+MAX discrepancy.
TRACK_LENGTH_GRACE = 10
TRACK_LENGTH_MAX = 30
TRACK_LENGTH_WEIGHT = 2.0
# MusicBrainz track ID matches.
TRACK_ID_WEIGHT = 5.0
# Parameters for string distance function.
# Words that can be moved to the end of a string using a comma.
SD_END_WORDS = ['the', 'a', 'an']
# Reduced weights for certain portions of the string.
SD_PATTERNS = [
(r'^the ', 0.1),
(r'[\[\(]?(ep|single)[\]\)]?', 0.0),
(r'[\[\(]?(featuring|feat|ft)[\. :].+', 0.1),
(r'\(.*?\)', 0.3),
(r'\[.*?\]', 0.3),
(r'(, )?(pt\.|part) .+', 0.2),
]
# Replacements to use before testing distance.
SD_REPLACE = [
(r'&', 'and'),
]
# Recommendation constants.
RECOMMEND_STRONG = 'RECOMMEND_STRONG'
RECOMMEND_MEDIUM = 'RECOMMEND_MEDIUM'
RECOMMEND_NONE = 'RECOMMEND_NONE'
# Thresholds for recommendations.
STRONG_REC_THRESH = 0.04
MEDIUM_REC_THRESH = 0.25
REC_GAP_THRESH = 0.25
# Artist signals that indicate "various artists". These are used at the
# album level to determine whether a given release is likely a VA
# release and also on the track level to to remove the penalty for
# differing artists.
VA_ARTISTS = (u'', u'various artists', u'va', u'unknown')
# Autotagging exceptions.
class AutotagError(Exception):
pass
# Global logger.
log = logging.getLogger('beets')
# Primary matching functionality.
def _string_dist_basic(str1, str2):
"""Basic edit distance between two strings, ignoring
non-alphanumeric characters and case. Comparisons are based on a
transliteration/lowering to ASCII characters. Normalized by string
length.
"""
str1 = unidecode(str1)
str2 = unidecode(str2)
str1 = re.sub(r'[^a-z0-9]', '', str1.lower())
str2 = re.sub(r'[^a-z0-9]', '', str2.lower())
if not str1 and not str2:
return 0.0
return levenshtein(str1, str2) / float(max(len(str1), len(str2)))
def string_dist(str1, str2):
"""Gives an "intuitive" edit distance between two strings. This is
an edit distance, normalized by the string length, with a number of
tweaks that reflect intuition about text.
"""
str1 = str1.lower()
str2 = str2.lower()
# Don't penalize strings that move certain words to the end. For
# example, "the something" should be considered equal to
# "something, the".
for word in SD_END_WORDS:
if str1.endswith(', %s' % word):
str1 = '%s %s' % (word, str1[:-len(word)-2])
if str2.endswith(', %s' % word):
str2 = '%s %s' % (word, str2[:-len(word)-2])
# Perform a couple of basic normalizing substitutions.
for pat, repl in SD_REPLACE:
str1 = re.sub(pat, repl, str1)
str2 = re.sub(pat, repl, str2)
# Change the weight for certain string portions matched by a set
# of regular expressions. We gradually change the strings and build
# up penalties associated with parts of the string that were
# deleted.
base_dist = _string_dist_basic(str1, str2)
penalty = 0.0
for pat, weight in SD_PATTERNS:
# Get strings that drop the pattern.
case_str1 = re.sub(pat, '', str1)
case_str2 = re.sub(pat, '', str2)
if case_str1 != str1 or case_str2 != str2:
# If the pattern was present (i.e., it is deleted in the
# the current case), recalculate the distances for the
# modified strings.
case_dist = _string_dist_basic(case_str1, case_str2)
case_delta = max(0.0, base_dist - case_dist)
if case_delta == 0.0:
continue
# Shift our baseline strings down (to avoid rematching the
# same part of the string) and add a scaled distance
# amount to the penalties.
str1 = case_str1
str2 = case_str2
base_dist = case_dist
penalty += weight * case_delta
dist = base_dist + penalty
return dist
def current_metadata(items):
"""Returns the most likely artist and album for a set of Items.
Each is determined by tag reflected by the plurality of the Items.
"""
keys = 'artist', 'album'
likelies = {}
consensus = {}
for key in keys:
values = [getattr(item, key) for item in items if item]
likelies[key], freq = plurality(values)
consensus[key] = (freq == len(values))
return likelies['artist'], likelies['album'], consensus['artist']
def order_items(items, trackinfo):
"""Orders the items based on how they match some canonical track
information. Returns a list of Items whose length is equal to the
length of ``trackinfo``. This always produces a result if the
numbers of items is at most the number of TrackInfo objects
(otherwise, returns None). In the case of a partial match, the
returned list may contain None in some positions.
"""
# Make sure lengths match: If there is less items, it might just be that
# there is some tracks missing.
if len(items) > len(trackinfo):
return None
# Construct the cost matrix.
costs = []
for cur_item in items:
row = []
for i, canon_item in enumerate(trackinfo):
row.append(track_distance(cur_item, canon_item, i+1))
costs.append(row)
# Find a minimum-cost bipartite matching.
matching = Munkres().compute(costs)
# Order items based on the matching.
ordered_items = [None]*len(trackinfo)
for cur_idx, canon_idx in matching:
ordered_items[canon_idx] = items[cur_idx]
return ordered_items
def track_distance(item, track_info, track_index=None, incl_artist=False):
"""Determines the significance of a track metadata change. Returns
a float in [0.0,1.0]. `track_index` is the track number of the
`track_info` metadata set. If `track_index` is provided and
item.track is set, then these indices are used as a component of
the distance calculation. `incl_artist` indicates that a distance
component should be included for the track artist (i.e., for
various-artist releases).
"""
# Distance and normalization accumulators.
dist, dist_max = 0.0, 0.0
# Check track length.
# If there's no length to check, apply no penalty.
if track_info.length:
diff = abs(item.length - track_info.length)
diff = max(diff - TRACK_LENGTH_GRACE, 0.0)
diff = min(diff, TRACK_LENGTH_MAX)
dist += (diff / TRACK_LENGTH_MAX) * TRACK_LENGTH_WEIGHT
dist_max += TRACK_LENGTH_WEIGHT
# Track title.
dist += string_dist(item.title, track_info.title) * TRACK_TITLE_WEIGHT
dist_max += TRACK_TITLE_WEIGHT
# Track artist, if included.
# Attention: MB DB does not have artist info for all compilations,
# so only check artist distance if there is actually an artist in
# the MB track data.
if incl_artist and track_info.artist and \
item.artist.lower() not in VA_ARTISTS:
dist += string_dist(item.artist, track_info.artist) * \
TRACK_ARTIST_WEIGHT
dist_max += TRACK_ARTIST_WEIGHT
# Track index.
if track_index and item.track:
if item.track not in (track_index, track_info.medium_index):
dist += TRACK_INDEX_WEIGHT
dist_max += TRACK_INDEX_WEIGHT
# MusicBrainz track ID.
if item.mb_trackid:
if item.mb_trackid != track_info.track_id:
dist += TRACK_ID_WEIGHT
dist_max += TRACK_ID_WEIGHT
# Plugin distances.
plugin_d, plugin_dm = plugins.track_distance(item, track_info)
dist += plugin_d
dist_max += plugin_dm
return dist / dist_max
def distance(items, album_info):
"""Determines how "significant" an album metadata change would be.
Returns a float in [0.0,1.0]. The list of items must be ordered.
"""
cur_artist, cur_album, _ = current_metadata(items)
cur_artist = cur_artist or ''
cur_album = cur_album or ''
# These accumulate the possible distance components. The final
# distance will be dist/dist_max.
dist = 0.0
dist_max = 0.0
# Artist/album metadata.
if not album_info.va:
dist += string_dist(cur_artist, album_info.artist) * ARTIST_WEIGHT
dist_max += ARTIST_WEIGHT
dist += string_dist(cur_album, album_info.album) * ALBUM_WEIGHT
dist_max += ALBUM_WEIGHT
# Track distances.
for i, (item, track_info) in enumerate(zip(items, album_info.tracks)):
if item:
dist += track_distance(item, track_info, i+1, album_info.va) * \
TRACK_WEIGHT
dist_max += TRACK_WEIGHT
else:
dist += MISSING_WEIGHT
dist_max += MISSING_WEIGHT
# Plugin distances.
plugin_d, plugin_dm = plugins.album_distance(items, album_info)
dist += plugin_d
dist_max += plugin_dm
# Normalize distance, avoiding divide-by-zero.
if dist_max == 0.0:
return 0.0
else:
return dist/dist_max
def match_by_id(items):
"""If the items are tagged with a MusicBrainz album ID, returns an
info dict for the corresponding album. Otherwise, returns None.
"""
# Is there a consensus on the MB album ID?
albumids = [item.mb_albumid for item in items if item.mb_albumid]
if not albumids:
log.debug('No album IDs found.')
return None
# If all album IDs are equal, look up the album.
if bool(reduce(lambda x,y: x if x==y else (), albumids)):
albumid = albumids[0]
log.debug('Searching for discovered album ID: ' + albumid)
return hooks._album_for_id(albumid)
else:
log.debug('No album ID consensus.')
return None
#fixme In the future, at the expense of performance, we could use
# other IDs (i.e., track and artist) in case the album tag isn't
# present, but that event seems very unlikely.
def recommendation(results):
"""Given a sorted list of result tuples, returns a recommendation
flag (RECOMMEND_STRONG, RECOMMEND_MEDIUM, RECOMMEND_NONE) based
on the results' distances.
"""
if not results:
# No candidates: no recommendation.
rec = RECOMMEND_NONE
else:
min_dist = results[0][0]
if min_dist < STRONG_REC_THRESH:
# Strong recommendation level.
rec = RECOMMEND_STRONG
elif len(results) == 1:
# Only a single candidate. Medium recommendation.
rec = RECOMMEND_MEDIUM
elif min_dist <= MEDIUM_REC_THRESH:
# Medium recommendation level.
rec = RECOMMEND_MEDIUM
elif results[1][0] - min_dist >= REC_GAP_THRESH:
# Gap between first two candidates is large.
rec = RECOMMEND_MEDIUM
else:
# No conclusion.
rec = RECOMMEND_NONE
return rec
def validate_candidate(items, tuple_dict, info):
"""Given a candidate AlbumInfo object, attempt to add the candidate
to the output dictionary of result tuples. This involves checking
the track count, ordering the items, checking for duplicates, and
calculating the distance.
"""
log.debug('Candidate: %s - %s' % (info.artist, info.album))
# Don't duplicate.
if info.album_id in tuple_dict:
log.debug('Duplicate.')
return
# Make sure the album has the correct number of tracks.
if len(items) > len(info.tracks):
log.debug('Too many items to match: %i > %i.' %
(len(items), len(info.tracks)))
return
# Put items in order.
ordered = order_items(items, info.tracks)
if not ordered:
log.debug('Not orderable.')
return
# Get the change distance.
dist = distance(ordered, info)
log.debug('Success. Distance: %f' % dist)
tuple_dict[info.album_id] = dist, ordered, info
def tag_album(items, timid=False, search_artist=None, search_album=None,
search_id=None):
"""Bundles together the functionality used to infer tags for a
set of items comprised by an album. Returns everything relevant:
- The current artist.
- The current album.
- A list of (distance, items, info) tuples where info is a
dictionary containing the inferred tags and items is a
reordered version of the input items list. The candidates are
sorted by distance (i.e., best match first).
- A recommendation, one of RECOMMEND_STRONG, RECOMMEND_MEDIUM,
or RECOMMEND_NONE; indicating that the first candidate is
very likely, it is somewhat likely, or no conclusion could
be reached.
If search_artist and search_album or search_id are provided, then
they are used as search terms in place of the current metadata.
May raise an AutotagError if existing metadata is insufficient.
"""
# Get current metadata.
cur_artist, cur_album, artist_consensus = current_metadata(items)
log.debug('Tagging %s - %s' % (cur_artist, cur_album))
# The output result (distance, AlbumInfo) tuples (keyed by MB album
# ID).
candidates = {}
# Try to find album indicated by MusicBrainz IDs.
if search_id:
log.debug('Searching for album ID: ' + search_id)
id_info = hooks._album_for_id(search_id)
else:
id_info = match_by_id(items)
if id_info:
validate_candidate(items, candidates, id_info)
rec = recommendation(candidates.values())
log.debug('Album ID match recommendation is ' + str(rec))
if candidates and not timid:
# If we have a very good MBID match, return immediately.
# Otherwise, this match will compete against metadata-based
# matches.
if rec == RECOMMEND_STRONG:
log.debug('ID match.')
return cur_artist, cur_album, candidates.values(), rec
# If searching by ID, don't continue to metadata search.
if search_id is not None:
if candidates:
return cur_artist, cur_album, candidates.values(), rec
else:
return cur_artist, cur_album, [], RECOMMEND_NONE
# Search terms.
if not (search_artist and search_album):
# No explicit search terms -- use current metadata.
search_artist, search_album = cur_artist, cur_album
log.debug(u'Search terms: %s - %s' % (search_artist, search_album))
# Is this album likely to be a "various artist" release?
va_likely = ((not artist_consensus) or
(search_artist.lower() in VA_ARTISTS) or
any(item.comp for item in items))
log.debug(u'Album might be VA: %s' % str(va_likely))
# Get the results from the data sources.
search_cands = hooks._album_candidates(items, search_artist, search_album,
va_likely)
log.debug(u'Evaluating %i candidates.' % len(search_cands))
for info in search_cands:
validate_candidate(items, candidates, info)
# Sort and get the recommendation.
candidates = sorted(candidates.itervalues())
rec = recommendation(candidates)
return cur_artist, cur_album, candidates, rec
def tag_item(item, timid=False, search_artist=None, search_title=None,
search_id=None):
"""Attempts to find metadata for a single track. Returns a
`(candidates, recommendation)` pair where `candidates` is a list
of `(distance, track_info)` pairs. `search_artist` and
`search_title` may be used to override the current metadata for
the purposes of the MusicBrainz title; likewise `search_id`.
"""
# Holds candidates found so far: keys are MBIDs; values are
# (distance, TrackInfo) pairs.
candidates = {}
# First, try matching by MusicBrainz ID.
trackid = search_id or item.mb_trackid
if trackid:
log.debug('Searching for track ID: ' + trackid)
track_info = hooks._track_for_id(trackid)
if track_info:
dist = track_distance(item, track_info, incl_artist=True)
candidates[track_info.track_id] = (dist, track_info)
# If this is a good match, then don't keep searching.
rec = recommendation(candidates.values())
if rec == RECOMMEND_STRONG and not timid:
log.debug('Track ID match.')
return candidates.values(), rec
# If we're searching by ID, don't proceed.
if search_id is not None:
if candidates:
return candidates.values(), rec
else:
return [], RECOMMEND_NONE
# Search terms.
if not (search_artist and search_title):
search_artist, search_title = item.artist, item.title
log.debug(u'Item search terms: %s - %s' % (search_artist, search_title))
# Get and evaluate candidate metadata.
for track_info in hooks._item_candidates(item, search_artist, search_title):
dist = track_distance(item, track_info, incl_artist=True)
candidates[track_info.track_id] = (dist, track_info)
# Sort by distance and return with recommendation.
log.debug('Found %i candidates.' % len(candidates))
candidates = sorted(candidates.itervalues())
rec = recommendation(candidates)
return candidates, rec
|
aspidites/beets
|
beets/autotag/match.py
|
Python
|
mit
| 18,831
|
"""
Name: sdeconn.py
Description: Utility functions for sde connections
Author: blord-castillo (http://gis.stackexchange.com/users/3386/blord-castillo)
Do on the fly connections in python using Sql Server direct connect only.
Eliminates the problem of database connection files being inconsistent from
machine to machine or user profile to user profile.
Usage:
import arcpy, sdeconn
myconnect1 = sdeconn.connect("database1", "server")
myconnect2 = sdeconn.connect("database2", "server")
Source:
http://gis.stackexchange.com/questions/16859/define-workspace-for-sde-connection-in-python
"""
# Import system modules
import arcpy, os, sys
def connect(database, server="<default server>", username="<default user>", password="<default password>", version="SDE.DEFAULT"):
# Check if value entered for option
try:
#Usage parameters for spatial database connection to upgrade
service = "sde:sqlserver:" + server
account_authentication = 'DATABASE_AUTH'
version = version.upper()
database = database.lower()
# Check if direct connection
if service.find(":") <> -1: #This is direct connect
ServiceConnFileName = service.replace(":", "")
ServiceConnFileName = ServiceConnFileName.replace(";", "")
ServiceConnFileName = ServiceConnFileName.replace("=", "")
ServiceConnFileName = ServiceConnFileName.replace("/", "")
ServiceConnFileName = ServiceConnFileName.replace("\\", "")
else:
arcpy.AddMessage("\n+++++++++")
arcpy.AddMessage("Exiting!!")
arcpy.AddMessage("+++++++++")
sys.exit("\nSyntax for a direct connection in the Service parameter is required for geodatabase upgrade.")
# Local variables
Conn_File_NameT = server + "_" + ServiceConnFileName + "_" + database + "_" + username
if os.environ.get("TEMP") == None:
temp = "c:\\temp"
else:
temp = os.environ.get("TEMP")
if os.environ.get("TMP") == None:
temp = "/usr/tmp"
else:
temp = os.environ.get("TMP")
Connection_File_Name = temp + os.sep + Conn_File_NameT + ".sde"
if os.path.isfile(Connection_File_Name):
return Connection_File_Name
# Check for the .sde file and delete it if present
arcpy.env.overwriteOutput=True
# Variables defined within the script; other variable options commented out at the end of the line
saveUserInfo = "SAVE_USERNAME" #DO_NOT_SAVE_USERNAME
saveVersionInfo = "SAVE_VERSION" #DO_NOT_SAVE_VERSION
print "\nCreating ArcSDE Connection File...\n"
# Process: Create ArcSDE Connection File...
# Usage: out_folder_path, out_name, server, service, database, account_authentication, username, password, save_username_password, version, save_version_info
print temp
print Conn_File_NameT
print server
print service
print database
print account_authentication
print username
print password
print saveUserInfo
print version
print saveVersionInfo
arcpy.CreateArcSDEConnectionFile_management(temp, Conn_File_NameT, server, service, database, account_authentication, username, password, saveUserInfo, version, saveVersionInfo)
for i in range(arcpy.GetMessageCount()):
if "000565" in arcpy.GetMessage(i): #Check if database connection was successful
arcpy.AddReturnMessage(i)
arcpy.AddMessage("\n+++++++++")
arcpy.AddMessage("Exiting!!")
arcpy.AddMessage("+++++++++\n")
sys.exit(3)
else:
arcpy.AddReturnMessage(i)
arcpy.AddMessage("+++++++++\n")
return Connection_File_Name
#Check if no value entered for option
except SystemExit as e:
print e.code
return
def listFcsInGDB():
''' set your arcpy.env.workspace to a gdb before calling '''
for fds in arcpy.ListDatasets('','feature') + ['']:
for fc in arcpy.ListFeatureClasses('','',fds):
yield os.path.join(arcpy.env.workspace, fds, fc)
if __name__ == '__main__':
print "started main"
database = arcpy.GetParameterAsText(0)
server = arcpy.GetParameterAsText(1)
username = arcpy.GetParameterAsText(2)
password = arcpy.GetParameterAsText(3)
version = "SDE.DEFAULT"
print "make connection file"
sde = connect(database, 'CSWPROD', 'mhwilkie', 'yukon', version)
print sde
arcpy.env.workspace = sde
print arcpy.env.workspace
for fc in listFcsInGDB():
print fc
print arcpy.GetMessages()
|
DougFirErickson/arcplus
|
ArcToolbox/Scripts/sdeconn.py
|
Python
|
mit
| 4,782
|
import numpy
from chainer.backends import cuda
from chainer.functions.loss import black_out
from chainer import link
from chainer.utils import walker_alias
from chainer import variable
class BlackOut(link.Link):
"""BlackOut loss layer.
.. seealso:: :func:`~chainer.functions.black_out` for more detail.
Args:
in_size (int): Dimension of input vectors.
counts (int list): Number of each identifiers.
sample_size (int): Number of negative samples.
Attributes:
W (~chainer.Parameter): Weight parameter matrix.
"""
sample_data = None
def __init__(self, in_size, counts, sample_size):
super(BlackOut, self).__init__()
vocab_size = len(counts)
p = numpy.array(counts, dtype=numpy.float32)
self.sampler = walker_alias.WalkerAlias(p)
self.sample_size = sample_size
with self.init_scope():
self.W = variable.Parameter(shape=(vocab_size, in_size))
def to_cpu(self):
super(BlackOut, self).to_cpu()
self.sampler.to_cpu()
def to_gpu(self, device=None):
with cuda._get_device(device):
super(BlackOut, self).to_gpu()
self.sampler.to_gpu()
def forward(self, x, t):
"""Computes the loss value for given input and ground truth labels.
Args:
x (~chainer.Variable): Input of the weight matrix multiplication.
t (~chainer.Variable): Batch of ground truth labels.
Returns:
~chainer.Variable: Loss value.
"""
batch_size = x.shape[0]
if self.sample_data is not None:
# for test
sample_data = self.sample_data
else:
shape = (batch_size, self.sample_size)
sample_data = self.sampler.sample(shape)
samples = variable.Variable(sample_data)
return black_out.black_out(x, t, self.W, samples)
|
ronekko/chainer
|
chainer/links/loss/black_out.py
|
Python
|
mit
| 1,916
|
#!/usr/bin/env python
"""
--------------------------------------------------------------------------------
Created: Jackson Lee 1/28/14
This script reads in a fasta file and a tab delimited text file of annotations
and replaces the header line with matched annotations
Input fasta file format:
any fasta file
Input annotations file format:
tab delimited headings of annotation names (1 word), with search string first
column followed by tab delimited annotation data
Output
fasta file with header replaced a not found file with enteries not found written
as 'outfile.notfound.*'
--------------------------------------------------------------------------------
usage: add_annotations_to_fasta.py -i in.fasta -a annotations.txt -o out.file
"""
#-------------------------------------------------------------------------------
#Header - Linkers, Libs, Constants
from string import strip
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from Bio import SeqIO
#-------------------------------------------------------------------------------
#function declarations
def add_annot(records, notfoundfile):
"""this is a generator function to create the new record if needed"""
for record in records:
if record.id in all_annotations:
record.description = '\t'.join([header + '=[' + annotation + ']' for header, annotation in zip(all_headers, all_annotations[record.id])])
else:
notfoundfile.write(record.id + '\n')
record.description = ''
yield record
#-------------------------------------------------------------------------------
#Body
print "Running..."
if __name__ == '__main__':
parser = ArgumentParser(usage = "add_annotations_to_fasta.py -i in.fasta -a \
annotations.txt -o out.file",
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-i", "--input_file", action="store",
dest="inputfilename", help="text fasta file")
parser.add_argument("-a", "--annotations_text", action="store",
dest="annotationfilename", help="tab-delimited annotations \
text file, first line headings, followed by entries for each sequence")
parser.add_argument("-o", "--output_file", action="store",
dest="outputfilename", help="fasta output file")
options = parser.parse_args()
mandatories = ["inputfilename", "annotationfilename", "outputfilename"]
for m in mandatories:
if not options.__dict__[m]:
print "\nError: Missing Arguments\n"
parser.print_help()
exit(-1)
inputfilename = options.inputfilename
annotationfilename = options.annotationfilename
outputfilename = options.outputfilename
outputfilenameprefix, dummy, extension = outputfilename.rpartition('.')
notfoundfilename = outputfilenameprefix + '.notfound.txt'
print "Reading annotations..."
with open(annotationfilename) as annotationfile:
#build a list of annotation dictionaries
all_annotations = {}
all_headers = annotationfile.next().strip().split('\t')[1:]
for line in annotationfile:
linelist = line.strip().split('\t')
key = linelist[0]
value = linelist[1:]
all_annotations[key] = value
print "reading sequence files and adding annotations..."
with open(inputfilename, 'U') as inputfile, open(outputfilename, 'w') as outputfile, \
open(notfoundfilename, 'w') as notfoundfile:
input_seq_iterator = SeqIO.parse(inputfile, "fasta")
SeqIO.write(add_annot(input_seq_iterator, notfoundfile), outputfile, "fasta")
print "Done!"
|
leejz/misc-scripts
|
add_annotations_to_fasta.py
|
Python
|
mit
| 3,801
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import farms.models
class Migration(migrations.Migration):
dependencies = [
('farms', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='cropseason',
name='season_end_date',
field=models.DateField(default=farms.models.get_default_cropseason_end, verbose_name=b'Approximate Season End Date'),
preserve_default=True,
),
migrations.AlterField(
model_name='cropseason',
name='season_start_date',
field=models.DateField(default=farms.models.get_default_cropseason_start, verbose_name=b'Season Start Date'),
preserve_default=True,
),
migrations.AlterField(
model_name='cropseasonevent',
name='date',
field=models.DateField(default=farms.models.get_default_cropseasonevent_date),
preserve_default=True,
),
]
|
warnes/irrigatorpro
|
irrigator_pro/farms/migrations/0002_auto_20150513_0054.py
|
Python
|
mit
| 1,045
|
import unittest
from katas.kyu_7.ninja_vs_samurai_strike import Warrior
class WarriorTestCase(unittest.TestCase):
def setUp(self):
self.ninja = Warrior('Ninja')
self.samurai = Warrior('Samurai')
def test_equals(self):
self.samurai.strike(self.ninja, 3)
self.assertEqual(self.ninja.health, 70)
|
the-zebulan/CodeWars
|
tests/kyu_7_tests/test_ninja_vs_samurai_strike.py
|
Python
|
mit
| 337
|
# Copyright (c) 2016, Will Thames and contributors
# Copyright (c) 2018, Ansible Project
from ansiblelint import AnsibleLintRule
class LineTooLongRule(AnsibleLintRule):
id = '204'
shortdesc = 'Lines should be no longer than 120 chars'
description = (
'Long lines make code harder to read and '
'code review more difficult'
)
severity = 'VERY_LOW'
tags = ['formatting']
version_added = 'v4.0.0'
def match(self, file, line):
return len(line) > 120
|
MatrixCrawler/ansible-lint
|
lib/ansiblelint/rules/LineTooLongRule.py
|
Python
|
mit
| 506
|
import os
import pytest
import yaml
from yml_config import Config
@pytest.fixture()
def data():
return {
'bool': False,
'number': 1,
'string': 'test_string',
'sequence': ['item1', 'item2', 'item3'],
'mapping': {
'key1': 'value1',
'key2': {
'key2a': 'value2a',
'key2b': 'value2b'
},
'key3': ['value3a', 'value3b', 'value3c'],
'key4': True
}
}
@pytest.fixture()
def config_dict(data):
return Config(data)
@pytest.fixture()
def config_yml(data):
with open('config.yml', 'w') as fp:
yaml.dump(data=data, stream=fp)
yield Config.from_yaml(path='config.yml')
os.remove('config.yml')
class TestConfig:
def test_init_config(self, config_dict):
assert 'mapping' in config_dict
def test_init_config_yml(self, config_yml):
assert 'mapping' in config_yml
def test_init_config_bad_file(self):
with pytest.raises(IOError):
Config.from_yaml(path='badfile')
def test_dump_to_env(self, config_yml):
config_yml.to_env()
assert 'BOOL' in os.environ
assert 'SEQUENCE' in os.environ
assert 'MAPPING_KEY1' in os.environ
def test_retrieve_variable_success(self, config_yml):
test_value = 'test_value'
os.environ['TEST_KEY'] = test_value
assert config_yml('TEST_KEY') == test_value
def test_retrieve_variable_fail(self, config_yml):
assert config_yml('WRONG_KEY', 'wrong_value') == 'wrong_value'
assert config_yml('WRONG_KEY') == ''
def test_set_variable_success(self, config_yml):
test_value = 'test_value'
config_yml['SET_KEY'] = test_value
assert 'SET_KEY' in os.environ
assert os.environ['SET_KEY'] == test_value
assert 'SET_KEY' in config_yml
assert config_yml['SET_KEY'] == test_value
|
bvujicic/yml-to-env
|
tests/test_config.py
|
Python
|
mit
| 1,945
|
import re
class FeaToolsParserSyntaxError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# used for removing all comments
commentRE = re.compile("#.*")
# used for finding all strings
stringRE = re.compile(
"\"" # "
"([^\"]*)" # anything but "
"\"" # "
)
# used for removing all comments
terminatorRE = re.compile(";")
# used for finding all feature names.
feature_findAll_RE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"feature\s+" # feature
"([\w\d]{4})" # name
"\s*{" # {
)
# used for finding the content of features.
# this regular expression will be compiled
# for each feature name found.
featureContentRE = [
"([\s;\{\}]|^)", # whitepace, ; {, } or start of line
"feature\s+", # feature
# feature name # name
"\s*\{", # {
"([\S\s]*?)", # content
"}\s*", # }
# feature name # name
"\s*;" # ;
]
# used for finding all lookup names.
lookup_findAll_RE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"lookup\s+" # lookup
"([\w\d_.]+)" # name
"\s*{" # {
)
# used for finding the content of lookups.
# this regular expression will be compiled
# for each lookup name found.
lookupContentRE = [
"([\s;\{\}]|^)", # whitepace, ; {, } or start of line
"lookup\s+", # lookup
# lookup name # name
"\s*\{", # {
"([\S\s]*?)", # content
"}\s*", # }
# lookup name # name
"\s*;" # ;
]
# used for finding all table names.
table_findAll_RE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"table\s+" # table
"([\w\d/]+)" # name
"\s*{" # {
)
# used for finding the content of tables.
# this regular expression will be compiled
# for each table name found.
tableContentRE = [
"([\s;\{\}]|^)", # whitepace, ; {, } or start of line
"table\s+", # feature
# table name # name
"\s*\{", # {
"([\S\s]*?)", # content
"}\s*", # }
# table name # name
"\s*;" # ;
]
# used for getting tag value pairs from tables.
tableTagValueRE = re.compile(
"([\w\d_.]+)" # tag
"\s+" #
"([^;]+)" # anything but ;
";" # ;
)
# used for finding all class definitions.
classDefinitionRE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"@" # @
"([\w\d_.]+)" # name
"\s*=\s*" # =
"\[" # [
"([\w\d\s\-_.@]+)" # content
"\]" # ]
"\s*;" # ;
, re.M
)
# used for getting the contents of a class definition
classContentRE = re.compile(
"([\w\d\-_.@]+)"
)
# used for finding inline classes within a sequence
sequenceInlineClassRE = re.compile(
"\[" # [
"([\w\d\s_.@]+)" # content
"\]" # ]
)
# used for finding all substitution type 1
subType1And4RE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"substitute|sub\s+" # sub
"([\w\d\s_.@\[\]]+)" # target
"\s+by\s+" # by
"([\w\d\s_.@\[\]]+)" # replacement
"\s*;" # ;
)
# used for finding all substitution type 3
subType3RE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"substitute|sub\s+" # sub
"([\w\d\s_.@\[\]]+)" # target
"\s+from\s+" # from
"([\w\d\s_.@\[\]]+)" # replacement
"\s*;" # ;
)
# used for finding all ignore substitution type 6
ignoreSubType6RE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"ignore\s+substitute|ignore\s+sub\s+" # ignore sub
"([\w\d\s_.@\[\]']+)" # preceding context, target, trailing context
"\s*;" # ;
)
# used for finding all substitution type 6
# XXX see failing unit test
subType6RE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"substitute|sub\s+" # sub
"([\w\d\s_.@\[\]']+)" # preceding context, target, trailing context
"\s+by\s+" # by
"([\w\d\s_.@\[\]]+)" # replacement
"\s*;" # ;
)
subType6TargetRE = re.compile(
"(\[" # [
"[\w\d\s_.@]+" # content
"\]" # ]'
"|" # <or>
"[\w\d_.@]+)'" # content
)
subType6TargetExtractRE = re.compile(
"([\w\d_.@]*)" # glyph or class names
)
# used for finding positioning type 1
posType1RE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"position|pos\s+" # pos
"([\w\d\s_.@\[\]]+)" # target
"\s+<" # <
"([-\d\s]+)" # value
"\s*>\s*;" # >;
)
# used for finding positioning type 2
posType2RE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"(enum\s+|\s*)" # enum
"(position|pos\s+)" # pos
"([-\w\d\s_.@\[\]]+)" # left, right, value
"\s*;" # ;
)
# used for finding all languagesystem
languagesystemRE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"languagesystem\s+" # languagesystem
"([\w\d]+)" # script tag
"\s+" #
"([\w\d]+)" # language tag
"\s*;" # ;
)
# use for finding all script
scriptRE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"script\s+" # script
"([\w\d]+)" # script tag
"\s*;" # ;
)
# used for finding all language
languageRE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"language\s+" # language
"([\w\d]+)" # language tag
"\s*" #
"([\w\d]*)" # include_dflt or exclude_dflt or nothing
"\s*;" # ;
)
# use for finding all includes
includeRE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"include\s*" # include
"\(\s*" # (
"([^\)]+)" # anything but )
"\s*\)" # )
"\s*;{0,1}" # ; which will occur zero or one times (ugh!)
)
# used for finding subtable breaks
subtableRE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"subtable\s*" # subtable
"\s*;" # ;
)
# used for finding feature references
featureReferenceRE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"feature\s+" # feature
"([\w\d]{4})" # name
"\s*;" # {
)
# used for finding lookup references
lookupReferenceRE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"lookup\s+" # lookup
"([\w\d]+)" # name
"\s*;" # {
)
# use for finding all lookup flags
lookupflagRE = re.compile(
"([\s;\{\}]|^)" # whitepace, ; {, } or start of line
"lookupflag\s+" # lookupflag
"([\w\d,\s]+)" # values
"\s*;" # ;
)
def _parseUnknown(writer, text):
text = text.strip()
## extract all table names
tableNames = table_findAll_RE.findall(text)
for precedingMark, tableName in tableNames:
# a regular expression specific to this lookup must
# be created so that nested lookups are safely handled
thisTableContentRE = list(tableContentRE)
thisTableContentRE.insert(2, tableName)
thisTableContentRE.insert(6, tableName)
thisTableContentRE = re.compile("".join(thisTableContentRE))
found = thisTableContentRE.search(text)
tableText = found.group(2)
start, end = found.span()
precedingText = text[:start]
if precedingMark:
precedingText += precedingMark
_parseUnknown(writer, precedingText)
_parseTable(writer, tableName, tableText)
text = text[end:]
## extract all feature names
featureNames = feature_findAll_RE.findall(text)
for precedingMark, featureName in featureNames:
# a regular expression specific to this lookup must
# be created so that nested lookups are safely handled
thisFeatureContentRE = list(featureContentRE)
thisFeatureContentRE.insert(2, featureName)
thisFeatureContentRE.insert(6, featureName)
thisFeatureContentRE = re.compile("".join(thisFeatureContentRE))
found = thisFeatureContentRE.search(text)
featureText = found.group(2)
start, end = found.span()
precedingText = text[:start]
if precedingMark:
precedingText += precedingMark
_parseUnknown(writer, precedingText)
_parseFeature(writer, featureName, featureText)
text = text[end:]
## extract all lookup names
lookupNames = lookup_findAll_RE.findall(text)
for precedingMark, lookupName in lookupNames:
# a regular expression specific to this lookup must
# be created so that nested lookups are safely handled
thisLookupContentRE = list(lookupContentRE)
thisLookupContentRE.insert(2, lookupName)
thisLookupContentRE.insert(6, lookupName)
thisLookupContentRE = re.compile("".join(thisLookupContentRE))
found = thisLookupContentRE.search(text)
lookupText = found.group(2)
start, end = found.span()
precedingText = text[:start]
if precedingMark:
precedingText += precedingMark
_parseUnknown(writer, precedingText)
_parseLookup(writer, lookupName, lookupText)
text = text[end:]
## extract all class data
classes = classDefinitionRE.findall(text)
for precedingMark, className, classContent in classes:
text = _executeSimpleSlice(precedingMark, text, classDefinitionRE, writer)
className = "@" + className
_parseClass(writer, className, classContent)
## extract substitutions
# sub type 1 and 4
subType1s = subType1And4RE.findall(text)
for precedingMark, target, replacement in subType1s:
text = _executeSimpleSlice(precedingMark, text, subType1And4RE, writer)
_parseSubType1And4(writer, target, replacement)
# sub type 3
subType3s = subType3RE.findall(text)
for precedingMark, target, replacement in subType3s:
text = _executeSimpleSlice(precedingMark, text, subType3RE, writer)
_parseSubType3(writer, target, replacement)
# sub type 6
subType6s = subType6RE.findall(text)
for precedingMark, target, replacement in subType6s:
text = _executeSimpleSlice(precedingMark, text, subType6RE, writer)
_parseSubType6(writer, target, replacement)
# ignore sub type 6
ignoreSubType6s = ignoreSubType6RE.findall(text)
for precedingMark, target in ignoreSubType6s:
text = _executeSimpleSlice(precedingMark, text, ignoreSubType6RE, writer)
_parseSubType6(writer, target, replacement=None, ignore=True)
## extract positions
# pos type 1
posType1s = posType1RE.findall(text)
for precedingMark, target, value in posType1s:
text = _executeSimpleSlice(precedingMark, text, posType1RE, writer)
_parsePosType1(writer, target, value)
# pos type 2
posType2s = posType2RE.findall(text)
for precedingMark, enumTag, posTag, targetAndValue in posType2s:
text = _executeSimpleSlice(precedingMark, text, posType2RE, writer)
_parsePosType2(writer, targetAndValue, needEnum=enumTag.strip())
## extract other data
# XXX look at FDK spec. sometimes a language tag of dflt will be passed
# it should be handled differently than the other tags.
# languagesystem
languagesystems = languagesystemRE.findall(text)
for precedingMark, scriptTag, languageTag in languagesystems:
text = _executeSimpleSlice(precedingMark, text, languagesystemRE, writer)
writer.languageSystem(languageTag, scriptTag)
# script
scripts = scriptRE.findall(text)
for precedingMark, scriptTag in scripts:
text = _executeSimpleSlice(precedingMark, text, scriptRE, writer)
writer.script(scriptTag)
# language
languages = languageRE.findall(text)
for precedingMark, languageTag, otherKeyword in languages:
text = _executeSimpleSlice(precedingMark, text, languageRE, writer)
if not otherKeyword or otherKeyword == "include_dflt":
writer.language(languageTag)
elif otherKeyword == "exclude_dflt":
writer.language(languageTag, includeDefault=False)
# include
inclusions = includeRE.findall(text)
for precedingMark, path in inclusions:
text = _executeSimpleSlice(precedingMark, text, includeRE, writer)
writer.include(path)
# feature reference
featureReferences = featureReferenceRE.findall(text)
for precedingMark, featureName in featureReferences:
text = _executeSimpleSlice(precedingMark, text, featureReferenceRE, writer)
writer.featureReference(featureName)
# lookup reference
lookupReferences = lookupReferenceRE.findall(text)
for precedingMark, lookupName in lookupReferences:
text = _executeSimpleSlice(precedingMark, text, lookupReferenceRE, writer)
writer.lookupReference(lookupName)
# lookupflag
lookupflags = lookupflagRE.findall(text)
for precedingMark, lookupflagValues in lookupflags:
text = _executeSimpleSlice(precedingMark, text, lookupflagRE, writer)
_parseLookupFlag(writer, lookupflagValues)
# subtable break
subtables = subtableRE.findall(text)
for precedingMark in subtables:
text = _executeSimpleSlice(precedingMark, text, subtableRE, writer)
writer.subtableBreak()
# empty instructions
terminators = terminatorRE.findall(text)
for terminator in terminators:
text = _executeSimpleSlice(None, text, terminatorRE, writer)
writer.rawText(terminator)
text = text.strip()
if text:
raise FeaToolsParserSyntaxError("Invalid Syntax: %s" % text)
def _executeSimpleSlice(precedingMark, text, regex, writer):
first = regex.search(text)
start, end = first.span()
precedingText = text[:start]
if precedingMark:
precedingText += precedingMark
_parseUnknown(writer, precedingText)
text = text[end:]
return text
def _parseFeature(writer, name, feature):
featureWriter = writer.feature(name)
parsed = _parseUnknown(featureWriter, feature)
def _parseLookup(writer, name, lookup):
lookupWriter = writer.lookup(name)
parsed = _parseUnknown(lookupWriter, lookup)
def _parseTable(writer, name, table):
tagValueTables = ["GDEF", "head", "hhea", "OS/2", "vhea"]
# skip unknown tables
if name not in tagValueTables:
return
_parseTagValueTable(writer, name, table)
def _parseTagValueTable(writer, name, table):
valueTypes = {
"GDEF" : {
"GlyphClassDef" : str
},
"head" : {
"FontRevision" : float
},
"hhea" : {
"CaretOffset" : float,
"Ascender" : float,
"Descender" : float,
"LineGap" : float,
},
"OS/2" : {
"FSType" : int,
"Panose" : "listOfInts",
"UnicodeRange" : "listOfInts",
"CodePageRange" : "listOfInts",
"TypoAscender" : float,
"TypoDescender" : float,
"TypoLineGap" : float,
"winAscent" : float,
"winDescent" : float,
"XHeight" : float,
"CapHeight" : float,
"WeightClass" : float,
"WidthClass" : float,
"Vendor" : str
},
"vhea" : {
"VertTypoAscender" : float,
"VertTypoDescender" : float,
"VertTypoLineGap" : float
}
}
tableTypes = valueTypes[name]
parsedTagValues = []
for tag, value in tableTagValueRE.findall(table):
tag = tag.strip()
value = value.strip()
if tag not in tableTypes:
raise FeaToolsParserSyntaxError("Unknown Tag: %s" % tag)
desiredType = tableTypes[tag]
if desiredType == "listOfInts":
v = []
for line in value.splitlines():
for i in line.split():
v.append(i)
value = v
values = []
for i in value:
try:
i = int(i)
values.append(i)
except ValueError:
raise FeaToolsParserSyntaxError("Invalid Syntax: %s" % i)
value = values
elif not isinstance(value, desiredType):
try:
value = desiredType(value)
except ValueError:
raise FeaToolsParserSyntaxError("Invalid Syntax: %s" % i)
parsedTagValues.append((tag, value))
writer.table(name, parsedTagValues)
def _parseClass(writer, name, content):
content = classContentRE.findall(content)
writer.classDefinition(name, content)
def _parseSequence(sequence):
parsed = []
for content in sequenceInlineClassRE.findall(sequence):
first = sequenceInlineClassRE.search(sequence)
start, end = first.span()
precedingText = sequence[:start]
parsed.extend(_parseSequence(precedingText))
parsed.append(_parseSequence(content))
sequence = sequence[end:]
content = [i for i in sequence.split(" ") if i]
parsed.extend(content)
return parsed
def _parseSubType1And4(writer, target, replacement):
target = _parseSequence(target)
# replacement will always be one item.
# either a single glyph/class or a list
# reresenting an inline class.
replacement = _parseSequence(replacement)
replacement = replacement[0]
if len(target) == 1:
target = target[0]
writer.gsubType1(target, replacement)
else:
# target will always be a list representing a sequence.
# the list may contain strings representing a single
# glyph/class or a list representing an inline class.
writer.gsubType4(target, replacement)
def _parseSubType3(writer, target, replacement):
# target will only be one item representing
# a glyph/class name.
target = classContentRE.findall(target)
target = target[0]
replacement = classContentRE.findall(replacement)
writer.gsubType3(target, replacement)
def _parseSubType6(writer, target, replacement=None, ignore=False):
# replacement will always be one item.
# either a single glyph/class or a list
# representing an inline class.
# the only exception to this is if
# this is an ignore substitution.
# in that case, replacement will
# be None.
if not ignore:
replacement = classContentRE.findall(replacement)
if len(replacement) == 1:
replacement = replacement[0]
#
targetText = target
#
precedingContext = ""
targets = subType6TargetRE.findall(targetText)
trailingContext = ""
#
targetCount = len(targets)
counter = 1
extractedTargets = []
for target in targets:
first = subType6TargetRE.search(targetText)
start, end = first.span()
if counter == 1:
precedingContext = _parseSequence(targetText[:start])
if counter == targetCount:
trailingContext = _parseSequence(targetText[end:])
# the target could be in a form like [o o.alt]
# so it has to be broken down
target = classContentRE.findall(target)
if len(target) == 1:
target = target[0]
extractedTargets.append(target)
counter += 1
targetText = targetText[end:]
writer.gsubType6(precedingContext, extractedTargets, trailingContext, replacement)
def _parsePosType1(writer, target, value):
# target will only be one item representing
# a glyph/class name
value = tuple([float(i) for i in value.strip().split(" ")])
writer.gposType1(target, value)
def _parsePosType2(writer, targetAndValue, needEnum=False):
# the target and value will be coming
# in as single string.
target = " ".join(targetAndValue.split(" ")[:-1])
value = targetAndValue.split(" ")[-1]
# XXX this could cause a choke
value = float(value)
target = _parseSequence(target)
writer.gposType2(target, value, needEnum)
def _parseLookupFlag(writer, values):
values = values.replace(",", " ")
values = [i for i in values.split(" ") if i]
# lookupflag format B is not supported except for value 0
if len(values) == 1:
try:
v = int(values[0])
if v != 0:
raise FeaToolsParserSyntaxError("lookupflag format B is not supported for any value other than 0")
else:
writer.lookupFlag()
return
except ValueError:
pass
rightToLeft = False
ignoreBaseGlyphs = False
ignoreLigatures = False
ignoreMarks = False
possibleValues = ["RightToLeft", "IgnoreBaseGlyphs", "IgnoreLigatures", "IgnoreMarks"]
for value in values:
if value not in possibleValues:
raise FeaToolsParserSyntaxError("Unknown lookupflag value: %s" % value)
if value == "RightToLeft":
rightToLeft = True
elif value == "IgnoreBaseGlyphs":
ignoreBaseGlyphs = True
elif value == "IgnoreLigatures":
ignoreLigatures = True
elif value == "IgnoreMarks":
ignoreMarks = True
writer.lookupFlag(rightToLeft=rightToLeft, ignoreBaseGlyphs=ignoreBaseGlyphs, ignoreLigatures=ignoreLigatures, ignoreMarks=ignoreMarks)
def parseFeatures(writer, text):
# strip the strings.
# (an alternative approach would be to escape the strings.
# the problem is that a string could contain parsable text
# that would fool the parsing algorithm.)
text = stringRE.sub("", text)
# strip the comments
text = commentRE.sub("", text)
# make sure there is a space after all ;
# since it makes the text more digestable
# for the regular expressions
text = terminatorRE.sub("; ", text)
_parseUnknown(writer, text)
|
jamesgk/feaTools
|
Lib/feaTools/parser.py
|
Python
|
mit
| 23,544
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__all__ = ["OneDSearch"]
import os
import h5py
import numpy as np
from .pipeline import Pipeline
from ._compute import compute_hypotheses
class OneDSearch(Pipeline):
cache_ext = ".h5"
query_parameters = dict(
durations=(None, True),
time_spacing=(0.05, False),
)
def get_result(self, query, parent_response):
# Parse the input parameters.
durations = np.atleast_1d(query["durations"])
dt = np.atleast_1d(query["time_spacing"])
# Get the processed light curves.
lcs = parent_response.model_light_curves
# Build the time grid.
tmin = min(map(lambda l: min(l.time), lcs))
tmax = max(map(lambda l: max(l.time), lcs))
time_grid = np.arange(tmin, tmax, dt)
# Allocate the output arrays.
dll_grid = np.zeros((len(time_grid), len(durations)))
depth_grid = np.zeros_like(dll_grid)
depth_ivar_grid = np.zeros_like(dll_grid)
# Loop over the light curves and compute the model for each one.
for lc in lcs:
# Find the times that are in this light curve.
m = (lc.time.min() <= time_grid) * (time_grid <= lc.time.max())
if not np.any(m):
continue
# Compute the grid of hypotheses.
i = np.arange(len(time_grid))[m]
imn, imx = i.min(), i.max()
compute_hypotheses(lc.search_lnlike, time_grid[imn:imx], durations,
depth_grid[imn:imx], depth_ivar_grid[imn:imx],
dll_grid[imn:imx])
return dict(
min_time_1d=tmin,
max_time_1d=tmax,
mean_time_1d=0.5 * (tmin + tmax),
dll_1d=dll_grid,
depth_1d=depth_grid,
depth_ivar_1d=depth_ivar_grid,
)
def save_to_cache(self, fn, response):
try:
os.makedirs(os.path.dirname(fn))
except os.error:
pass
with h5py.File(fn, "w") as f:
f.attrs["min_time_1d"] = response["min_time_1d"]
f.attrs["max_time_1d"] = response["max_time_1d"]
f.attrs["mean_time_1d"] = response["mean_time_1d"]
f.create_dataset("dll_1d", data=response["dll_1d"],
compression="gzip")
f.create_dataset("depth_1d", data=response["depth_1d"],
compression="gzip")
f.create_dataset("depth_ivar_1d", data=response["depth_ivar_1d"],
compression="gzip")
def load_from_cache(self, fn):
if os.path.exists(fn):
with h5py.File(fn, "r") as f:
try:
return dict(
min_time_1d=f.attrs["min_time_1d"],
max_time_1d=f.attrs["max_time_1d"],
mean_time_1d=f.attrs["mean_time_1d"],
dll_1d=f["dll_1d"][...],
depth_1d=f["depth_1d"][...],
depth_ivar_1d=f["depth_ivar_1d"][...],
)
except KeyError:
pass
return None
|
dfm/ketu
|
ketu/one_d_search.py
|
Python
|
mit
| 3,255
|
import os
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.db import connection
from django.http import HttpResponse, StreamingHttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from .models import TwitterUser, TwitterUserItem
from .utils import process_info, xls_tweets_workbook, csv_tweets_writer
def _paginate(request, paginator):
page = request.GET.get('page', 1)
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return page, items
@login_required
def home(request):
qs_users = TwitterUser.objects.all()
qs_users_alpha = qs_users.order_by('?')
qs_items = TwitterUserItem.objects.order_by('-date_published')
try:
cursor = connection.cursor()
cursor.execute("""
SELECT DATE_TRUNC('day', date_published) AS day,
COUNT(*) AS item_count
FROM ui_twitteruseritem
WHERE date_published > NOW() - INTERVAL '1 month'
GROUP BY 1
ORDER BY day
LIMIT 31 OFFSET 1;
""")
daily_counts = [[row[0].strftime('%Y-%m-%d'), int(row[1])]
for row in cursor.fetchall()]
# Workaround for known "slow count(*)" issue
cursor.execute("""
SELECT reltuples FROM pg_class WHERE relname='ui_twitteruseritem'
""")
item_count = int(cursor.fetchone()[0])
except:
daily_counts = []
item_count = 0
return render(request, 'home.html', {
'title': 'home',
'users': qs_users,
'users_alpha': qs_users_alpha[:25],
'items': qs_items[:10],
'item_count': item_count,
'daily_counts': daily_counts,
})
@login_required
def search(request):
q = request.GET.get('q', '')
title = ''
if q:
qs_users = TwitterUser.objects.filter(name__icontains=q)
qs_users = qs_users.extra(select={'lower_name': 'lower(name)'})
qs_users = qs_users.order_by('lower_name')
title = 'search: "%s"' % q
return render(request, 'search.html', {
'title': title,
'users': qs_users,
'q': q,
})
@login_required
def tweets(request):
qs_tweets = TwitterUserItem.objects.order_by('-date_published')
paginator = Paginator(qs_tweets, 50)
page, tweets = _paginate(request, paginator)
return render(request, 'tweets.html', {
'title': 'all tweets, chronologically',
'tweets': tweets,
'paginator': paginator,
'page': page,
})
@login_required
def users_alpha(request):
qs_users = TwitterUser.objects.all()
qs_users = qs_users.extra(select={'lower_name': 'lower(name)'})
qs_users = qs_users.order_by('lower_name')
paginator = Paginator(qs_users, 25)
page, users = _paginate(request, paginator)
return render(request, 'users_alpha.html', {
'title': 'all users, alphabetically',
'users': users,
'paginator': paginator,
'page': page,
})
@login_required
def twitter_user(request, name=''):
user = get_object_or_404(TwitterUser, name=name)
qs_tweets = user.items.order_by('-date_published')
# grab a slightly older tweet to use for bio info
if qs_tweets.count() > 20:
recent_tweet = qs_tweets[20]
elif qs_tweets.count() > 0:
recent_tweet = qs_tweets[0]
else:
recent_tweet = None
paginator = Paginator(qs_tweets, 50)
page, tweets = _paginate(request, paginator)
# fetch 90 days' worth of counts
try:
cursor = connection.cursor()
cursor.execute("""
SELECT DATE_TRUNC('day', date_published) AS day,
COUNT(*) AS item_count
FROM ui_twitteruseritem
WHERE twitter_user_id = %s
AND date_published > NOW() - INTERVAL '3 months'
GROUP BY 1
ORDER BY day
LIMIT 91 OFFSET 1;
""" % (user.id))
daily_counts = [[row[0].strftime('%Y-%m-%d'), int(row[1])]
for row in cursor.fetchall()]
except:
daily_counts = []
return render(request, 'twitter_user.html', {
'title': 'twitter user: %s' % name,
'twitter_user': user,
'qs_tweets': qs_tweets,
'tweets': tweets,
'recent_tweet': recent_tweet,
'daily_counts': daily_counts,
'paginator': paginator,
'page': page,
})
@login_required
def twitter_user_csv(request, name=''):
user = get_object_or_404(TwitterUser, name=name)
qs_tweets = user.items.order_by('-date_published')
csvwriter = csv_tweets_writer(qs_tweets, TwitterUserItem.csv_headers)
response = StreamingHttpResponse(csvwriter.out(), content_type='text/csv')
response['Content-Disposition'] = \
'attachment; filename="%s.csv"' % name
return response
@login_required
def twitter_user_xls(request, name=''):
user = get_object_or_404(TwitterUser, name=name)
qs_tweets = user.items.order_by('-date_published')
tworkbook = xls_tweets_workbook(qs_tweets, TwitterUserItem.csv_headers)
response = HttpResponse(content_type='text/ms-excel')
response['Content-Disposition'] = \
'attachment; filename="%s.xlsx"' % name
tworkbook.save(response)
return response
@login_required
def twitter_item(request, id=0):
item = get_object_or_404(TwitterUserItem, id=int(id))
return HttpResponse(item.item_json, content_type='application/json')
@login_required
def twitter_item_links(request, id=0):
item = get_object_or_404(TwitterUserItem, id=int(id))
unshortened = [item.unshorten(l) for l in item.links]
return render(request, 'twitter_item_links.html', {
'item': item,
'unshortened': unshortened,
})
def logout(request):
auth.logout(request)
return redirect(reverse('home'))
#redirect to new page in case not superuser
@user_passes_test(lambda u: u.is_superuser, login_url='django_no_superuser')
def status(request):
if os.path.exists(settings.SUPERVISOR_UNIX_SOCKET_FILE):
proc_status = process_info()
return render(request, 'status.html', {
'list': proc_status,
})
else:
return render(request, 'status_not_found.html')
|
gwu-libraries/social-feed-manager
|
sfm/ui/views.py
|
Python
|
mit
| 6,575
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import cPickle as pickle
from uuid import UUID
class HNew1orMoreNamePart2_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HNew1orMoreNamePart2_CompleteLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HNew1orMoreNamePart2_CompleteLHS, self).__init__(name='HNew1orMoreNamePart2_CompleteLHS', num_nodes=3, edges=[])
# Add the edges
self.add_edges([(2, 0), (1, 2)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_pre__UMLRT2Kiltera_MM'
p2
aS'MoTifRule'
p3
a.""")
self["MT_constraint__"] = pickle.loads("""V#===============================================================================\u000a# This code is executed after the nodes in the LHS have been matched.\u000a# You can access a matched node labelled n by: PreNode('n').\u000a# To access attribute x of node n, use: PreNode('n')['x'].\u000a# The given constraint must evaluate to a boolean expression:\u000a# returning True enables the rule to be applied,\u000a# returning False forbids the rule from being applied.\u000a#===============================================================================\u000a\u000areturn True\u000a
p1
.""")
self["name"] = """"""
self["GUID__"] = UUID('08273256-ac6e-428d-a4c5-83195f52a932')
# Set the node attributes
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """2"""
self.vs[0]["MT_subtypes__"] = pickle.loads("""(lp1
S'MT_pre__PythonRef'
p2
a.""")
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__Name"""
self.vs[0]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["GUID__"] = UUID('b1618007-e08e-4129-aaa6-2aec21f53291')
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_pivotIn__"] = """element1"""
self.vs[1]["MT_label__"] = """1"""
self.vs[1]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__New"""
self.vs[1]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["GUID__"] = UUID('377ea6a7-b3d4-4515-893b-ec0ac5cec603')
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_pre__associationType"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__directLink_T"""
self.vs[2]["GUID__"] = UUID('a09e55b2-224a-4d53-94c6-5ed4d20af67d')
def eval_classtype2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_associationType3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
|
levilucio/SyVOLT
|
UMLRT2Kiltera_MM/Properties/Multiplicity/Himesis/HNew1orMoreNamePart2_CompleteLHS.py
|
Python
|
mit
| 12,682
|
from PyQt4 import QtCore,QtGui
import re
# Initialize Qt resources from file resources.py
from sessionClasses import *
class DataSource():
"""
gestisce le operazioni di lettura e scrittura sui settings di Qt per la gestione degli utenti
"""
def __init__(self,company,application):
"""
@param string: name-space dell'azienda
@param string:name-space dell'applicazione
"""
self.ds=QtCore.QSettings(company, application)
def getUserCount(self):
"""ritorna il valore attuale di UserCounter
@return: int
"""
c= self.ds.value("UserCount", 0).toInt()[0]
return c
def registerUser(self,user, label):
"""
registra un utente nel file di sistema con l'etichetta passata come parametro
@param extendedUser:
@param string: etichetta di identificazione utente
"""
self.ds.setValue(label, user)
def setUserCounter(self,c):
"""setta il valore di UserCount
@param int: nuovo valore di userCount
"""
self.ds.setValue("userCount",c)
def setDbCounter(self,c):
"""setta il valore di DbCounter
@param int: nuovo valore di userCount
"""
self.ds.setValue("dbCounter",c)
def getDbCount(self):
"""ritorna il valore attuale di dbCounter
@return: int
"""
c= self.ds.value("dbCounter",0).toInt()[0]
return c
def parser(self,txt,pattern):
""" ritorna la lista di tutte le occorrenze del pattern nella stringa passata
@param string: stringa da esaminare
@param string: pattern da applicare
@return: [string]
"""
match=re.compile(pattern)
return match.findall(txt)
def getUser(self,uname):
"""ritorna lo user registrato con uname
@param string: username come appare nella lista di QSettings
@return User
"""
return self.ds.value(uname).toPyObject()
def getUserList(self):
""" ritorna la lista degli utenti registrati
@return [User]
"""
self.chiavi=self.ds.allKeys()
userNames=[ str(i) for i in self.parser(self.chiavi.join("|"), "\user\d+")]
users=[]
for i in userNames:
users.append(self.getUser(i))
return users
def getDbList(self):
""" ritorna la lista degli utenti registrati
@return [extendedDb]
"""
self.chiavi=self.ds.allKeys()
userNames=[ str(i) for i in self.parser(self.chiavi.join("|"), "db\d+")]
dbs=[]
for i in userNames:
dbs.append(self.getUser(i))
return dbs
def registerDb(self,name,db):
"""registra un db nel sistema
@param string: nome del db nel sistema
@param extendedDb:
"""
self.chiavi=self.ds.allKeys()
Id=self.ds.value("QString", 0).toInt()[0]
#registro il db
self.ds.setValue(name,db)
# incremento il contatore
self.ds.setValue("dbCounter",Id+1)
print "dbCounter",Id
|
arpho/mmasgis5
|
mmasgis/DataSource.py
|
Python
|
mit
| 2,673
|
import re
from .default import DefaultParser
class NoseParser(DefaultParser):
name = "nose"
def command_matches(self, command):
return "nosetests" in command or "-m nose" in command
def num_passed(self, result):
return self.num_total(result) - self.num_failed(result)
def num_total(self, result):
# Ran 2 test(s) in nnn seconds.
m = re.findall("Ran (\d+) tests?", result.output)
return int(m[-1])
def num_failed(self, result):
# If failed, you'll see one of
# FAILED (failures=1)
# FAILED (failures=1, errors=1)
failed = 0
m = re.findall("FAILED \(.*failures=(\d+)", result.output)
if len(m) > 0:
failed += int(m[-1])
failed += self.num_error(result)
return failed
def num_error(self, result):
# If errored, you'll see one of
# FAILED (failures=1, errors=1)
# FAILED (errors=1)
m = re.findall("FAILED \(.*errors=(\d+)", result.output)
if len(m) > 0:
return int(m[-1])
return 0
|
skoczen/polytester
|
polytester/parsers/nose.py
|
Python
|
mit
| 1,085
|
"""
Problem 2c.
Write a while loop that sums the values 1 through end, inclusive.
end is a variable that we define for you. So, for example, if we
define end to be 6, your code should print out the result:
21
which is 1 + 2 + 3 + 4 + 5 + 6.
"""
i = 1
sm = 0
while (i <= end):
sm += i
i += 1
print sm
|
CptDemocracy/Python
|
MITx-6.00.1x-EDX-Introduction-to-Computer-Science/Week-2/Lecture-3/problem2c.py
|
Python
|
mit
| 318
|
"""
Test utility functions
"""
from unittest.mock import Mock
import csv
import json
import requests
import pytest
from mailchimp3 import MailChimp
from mcwriter.utils import (serialize_dotted_path_dict,
serialize_lists_input,
serialize_members_input,
serialize_tags_input,
prepare_batch_data_lists,
prepare_batch_data_add_members,
prepare_batch_data_delete_members,
_setup_client,
_verify_credentials,
batch_still_pending,
wait_for_batch_to_finish,
write_batches_to_csv)
from mcwriter.exceptions import ConfigError, MissingFieldError
@pytest.fixture
def finished_batch_response():
return {
'_links': [{'href': 'https://us15.api.mailchimp.com/3.0/batches',
'method': 'GET',
'rel': 'parent',
'schema': 'https://us15.api.mailchimp.com/schema/3.0/CollectionLinks/Batches.json',
'targetSchema': 'https://us15.api.mailchimp.com/schema/3.0/Definitions/Batches/CollectionResponse.json'},
{'href': 'https://us15.api.mailchimp.com/3.0/batches/a3bb03520b',
'method': 'GET',
'rel': 'self',
'targetSchema': 'https://us15.api.mailchimp.com/schema/3.0/Definitions/Batches/Response.json'},
{'href': 'https://us15.api.mailchimp.com/3.0/batches/a3bb03520b',
'method': 'DELETE',
'rel': 'delete'}],
'completed_at': '2017-04-21T11:08:22+00:00',
'errored_operations': 1,
'finished_operations': 2,
'id': 'a3bb03520b',
'response_body_url': 'https://mailchimp-api-batch.s3.amazonaws.com/a3bb03520b-response.tar.gz?AWSAccessKeyId=AKIAJWOH5BECJQZIEWNQ&Expires=1492773508&Signature=Z2WnBzKxILiuOqW%2FGHa66IqRhM8%3D',
'status': 'finished',
'submitted_at': '2017-04-21T11:08:15+00:00',
'total_operations': 3}
@pytest.fixture
def pending_batch_response():
return {
'_links': [{'href': 'https://us15.api.mailchimp.com/3.0/batches',
'method': 'GET',
'rel': 'parent',
'schema': 'https://us15.api.mailchimp.com/schema/3.0/CollectionLinks/Batches.json',
'targetSchema': 'https://us15.api.mailchimp.com/schema/3.0/Definitions/Batches/CollectionResponse.json'},
{'href': 'https://us15.api.mailchimp.com/3.0/batches/a3bb03520b',
'method': 'GET',
'rel': 'self',
'targetSchema': 'https://us15.api.mailchimp.com/schema/3.0/Definitions/Batches/Response.json'},
{'href': 'https://us15.api.mailchimp.com/3.0/batches/a3bb03520b',
'method': 'DELETE',
'rel': 'delete'}],
'id': 'a3bb03520b',
'response_body_url': 'https://mailchimp-api-batch.s3.amazonaws.com/a3bb03520b-response.tar.gz?AWSAccessKeyId=AKIAJWOH5BECJQZIEWNQ&Expires=1492773508&Signature=Z2WnBzKxILiuOqW%2FGHa66IqRhM8%3D',
'status': 'pending',
'submitted_at': '2017-04-21T11:08:15+00:00',
'total_operations': 3}
def test_serializing_nested_path():
flat = {'name': 'Robin',
'contact__address': 'Foobar',
'contact__country': 'Czechia',
'confirm': True}
expected = {
'name': 'Robin',
'contact': {
'address': 'Foobar',
'country': 'Czechia'},
'confirm': True}
serialized = serialize_dotted_path_dict(flat)
assert expected == serialized
def test_serializing_2_levels_nested_path():
flat = {'name': 'Robin',
'merge_fields__ADDRESS__zip': '123',
'merge_fields__ADDRESS__country': 'Czechia',
'merge_fields__CONFIRM': True}
expected = {
'name': 'Robin',
'merge_fields': {
"ADDRESS": {
'zip': '123',
'country': 'Czechia'},
'CONFIRM': True}
}
serialized = serialize_dotted_path_dict(flat)
assert expected == serialized
def test_serializing_new_lists_input_csv(new_lists_csv):
# Fake inputs
serialized = serialize_lists_input(new_lists_csv.name)
expected = [{'campaign_defaults': {'from_email': 'dark_mage001@email.com',
'from_name': 'Albus Dumlbedore',
'language': 'English',
'subject': 'Welcome, young wizard!'},
'contact': {'address1': '4 Privet Drive',
'address2': '',
'city': 'Wizardshire',
'company': 'Magical company ltd.',
'country': 'Wonderland',
'phone': '',
'state': 'Wonderland',
'zip': '66678'},
'email_type_option': True,
'name': 'Wizards of the world',
'notify_on_subscribe': '',
'notify_on_unsubscribe': '',
'permission_reminder': 'You are in this list, because you just turned 11 and have magical abilities',
'use_archive_bar': False,
'custom_id': 'custom_list1',
'visibility': 'prv'},
# Same as the first one, but all 'a' are switched to 'xx'
{'campaign_defaults': {'from_email': 'dxxrk_mxxge001@emxxil.com',
'from_name': 'XXlbus Dumlbedore',
'language': 'English',
'subject': 'Welcome, young wizxxrd!'},
'contact': {'address1': '4 Privet Drive',
'address2': '',
'city': 'Wizxxrdshire',
'company': 'Mxxgicxxl compxxny ltd.',
'country': 'Wonderlxxnd',
'phone': '',
'state': 'Wonderlxxnd',
'zip': '66678'},
'email_type_option': True,
'name': 'Wizxxrds of the world',
'notify_on_subscribe': '',
'notify_on_unsubscribe': '',
'permission_reminder': 'You xxre in this list, becxxuse you just turned 11 xxnd hxxve mxxgicxxl xxbilities',
'use_archive_bar': False,
'custom_id': 'custom_list2',
'visibility': 'prv'},
]
assert expected[0] == serialized[0]
assert expected[1] == serialized[1]
def test_preparing_batch_data():
data = [{'name':'bar', 'baz':'qux'},
{'name':'bar2', 'baz': 'quxx'}]
batch_data = prepare_batch_data_lists(data)
expected = {'operations': [
{'method': 'POST',
'path': '/lists',
'operation_id': 'bar',
'body': json.dumps({'name':'bar', 'baz':'qux'})},
{'method': 'POST',
'path': '/lists',
'operation_id': 'bar2',
'body': json.dumps({'name':'bar2', 'baz': 'quxx'})}
]}
assert batch_data == expected
def test_preparing_batch_data_for_delete():
serialized_data = [{'list_id': 'foo', 'subscriber_hash': 'abc', 'email_address': 'me@ex.co'}]
expected = {
'operations': [
{'method': 'DELETE',
'path': '/lists/foo/members/abc',
'operation_id': 'me@ex.co'}
]
}
prepared = prepare_batch_data_delete_members(serialized_data)
assert prepared == expected
def test_preparing_batch_data_for_update():
serialized_data = [
{'list_id': 'foo',
'subscriber_hash': 'bar',
'email_address': 'xyz@qux.com',
'status_if_new': 'pending'}
]
prepare_batch_data_add_members
def test_serializing_members_input(new_members_csv):
serialized = serialize_members_input(new_members_csv.name, action='add_or_update', chunk_size=1)
first_chunk = next(serialized)
expected = [
{'email_address': 'robin@keboola.com',
'list_id': '12345', # comes from the csvfile
'vip': True,
'interests' : {
'1234abc': True,
'abc1234': True},
'status': 'subscribed',
'status_if_new': 'subscribed',
'email_type': True,
'subscriber_hash': 'a2a362ca5ce6dc7e069b6f7323342079', #md5 hash
'merge_fields': {'*|FNAME|*': 'Robin'},
},
{'email_address': 'foo@bar.com',
'list_id': '12345',
'vip': False,
'interests' : {
'1234abc': True,
'abc1234': False},
'status': 'pending',
'status_if_new': 'subscribed',
'subscriber_hash': 'f3ada405ce890b6f8204094deb12d8a8', #md5 hash
'email_type': False,
'merge_fields': {'*|FNAME|*': ''},
}
]
assert first_chunk[0] == expected[0]
second_chunk = next(serialized)
assert second_chunk[0] == expected[1]
with pytest.raises(StopIteration):
next(serialized)
def test_serializing_members_input_linked_to_lists(new_members_csv_linked_to_lists,
created_lists):
serialized = serialize_members_input(
new_members_csv_linked_to_lists.name,
action='add_or_update',
created_lists=created_lists)
first_chunk = next(serialized)
expected = [
{'email_address': 'robin@keboola.com',
'list_id': 'mailchimp_list_id', # the id comes from the mapping returned by create_lists()
'vip': True,
'interests' : {
'1234abc': True,
'abc1234': True},
'status': 'subscribed',
'status_if_new': 'subscribed',
'email_type': True,
'subscriber_hash': 'a2a362ca5ce6dc7e069b6f7323342079', #md5 hash
'merge_fields': {'*|FNAME|*': 'Robin'},
},
{'email_address': 'foo@bar.com',
'list_id': 'mailchimp_list_id',
'vip': False,
'interests' : {
'1234abc': True,
'abc1234': False},
'status': 'pending',
'status_if_new': 'subscribed',
'subscriber_hash': 'f3ada405ce890b6f8204094deb12d8a8', #md5 hash
'email_type': False,
'merge_fields': {'*|FNAME|*': ''},
}
]
assert first_chunk == expected
def test_preparing_batch_members_data_adding_members():
data = [{'foo':'bar', 'email_address': 'foo@barbar.cz',
'list_id':'ab1234', 'subscriber_hash': 'foobar',
'status_if_new': 'subscribed'},
{'foo':'bar2', 'email_address': 'foo@bar.cz',
'list_id':'ab1234', 'subscriber_hash': 'foobar',
'status': 'subscribed'}]
batch_data = prepare_batch_data_add_members(data)
expected = {'operations': [
{'method': 'PUT',
'path': '/lists/ab1234/members/foobar',
'operation_id': 'foobar',
'body': json.dumps({'foo':'bar',
'email_address': 'foo@barbar.cz',
'status_if_new': 'subscribed'})},
{'method': 'PUT',
'path': '/lists/ab1234/members/foobar',
'operation_id': 'foobar',
'body': json.dumps({'foo':'bar2', 'email_address': 'foo@bar.cz', 'status': 'subscribed'})}
]}
assert batch_data == expected
def test_setting_up_client_works(monkeypatch):
params = {'#apikey': 'secret'}
client = _setup_client(params, enabled=False)
assert isinstance(client, MailChimp)
def test_setting_up_client_fails_on_nonpresent_apikey(monkeypatch):
params = {}
with pytest.raises(MissingFieldError):
client = _setup_client(params, enabled=False)
def test_veryifying_credentials_fails_on_wrong_apikey(client, monkeypatch):
def raise_http_error():
err = requests.HTTPError("WRONG!")
class Resp:
status_code = 401
err.response = Resp()
raise err
monkeypatch.setattr(client.api_root, 'get', raise_http_error)
with pytest.raises(ConfigError) as excinfo:
_verify_credentials(client)
excinfo.match('Invalid credentials')
def test_veryifying_credentials_wrong_apikey_ConnectionError(client, monkeypatch):
def raise_ConnectionError():
raise requests.ConnectionError("WRONG!")
monkeypatch.setattr(client.api_root, 'get', raise_ConnectionError)
with pytest.raises(ConfigError) as excinfo:
_verify_credentials(client)
excinfo.match('Invalid credentials')
def test_checking_status_of_batch_operation(pending_batch_response,
finished_batch_response):
assert batch_still_pending(pending_batch_response) is True
assert batch_still_pending(finished_batch_response) is False
def test_waiting_for_batch_op_to_finish(pending_batch_response,
finished_batch_response,
monkeypatch):
fake_client_get_batch_id = Mock(
side_effect=[pending_batch_response, finished_batch_response])
monkeypatch.setattr('mailchimp3.mailchimpclient.MailChimpClient._get',
fake_client_get_batch_id)
client = MailChimp('foo', 'bar')
batch_id = 'a3bb03520b'
results = wait_for_batch_to_finish(client, batch_id, api_delay=0.1)
assert results == finished_batch_response
def test_parsing_tags_table(add_tags_csv):
serialized = serialize_tags_input(add_tags_csv.strpath)
expected = [{
'list_id': 'abc0123',
'name': 'My first tag',
'type': 'text',
'tag': "MYFIRST",
'required': False,
'options': {'size': 255}
}]
assert serialized == expected
def test_parsing_tags_table_with_custom_id(add_tags_csv_custom_id):
created_lists = {'wizards': 'ab0123'}
serialized = serialize_tags_input(add_tags_csv_custom_id.strpath, created_lists)
expected = [{
'list_id': 'ab0123',
'name': 'My first tag',
'type': 'text',
'tag': "MYFIRST",
'required': False,
'options': {'size': 255}
}]
assert serialized == expected
def test_writing_batches_csv(tmpdir):
outpath = tmpdir.join("out.csv")
batches = [{'_links': [{'href': 'https://us16.api.mailchimp.com/3.0/batches',
'rel': 'delete'}],
'completed_at': '2017-11-01T10:05:15+00:00',
'errored_operations': 13,
'finished_operations': 500,
'id': '71efba6182',
'response_body_url': 'https://mai',
'status': 'finished',
'submitted_at': '2017-11-01T10:00:49+00:00',
'total_operations': 500},
{'_links': [{'href': 'https://us16.api.mailchimp.com/3.0/batches',
'rel': 'delete'}],
'completed_at': '2017-11-01T10:05:15+00:00',
'errored_operations': 13,
'finished_operations': 500,
'id': '71efba6182',
'response_body_url': 'https://mai',
'status': 'finished',
'submitted_at': '2017-11-01T10:00:49+00:00',
'total_operations': 500}
]
outpath = write_batches_to_csv(batches, outpath.strpath)
with open(outpath, 'r') as f:
assert len(f.readlines()) == 3
with open(outpath, 'r') as f:
reader = csv.DictReader(f)
assert '_links' not in reader.fieldnames
assert 'id' in reader.fieldnames
|
pocin/kbc-mailchimp-writer
|
tests/test_utils.py
|
Python
|
mit
| 15,874
|
from abc import abstractmethod
from pgmpy.extern.six.moves import reduce
class BaseFactor(object):
"""
Base class for Factors. Any Factor implementation should inherit this class.
"""
def __init__(self, *args, **kwargs):
pass
@abstractmethod
def is_valid_cpd(self):
pass
def factor_product(*args):
"""
Returns factor product over `args`.
Parameters
----------
args: `BaseFactor` instances.
factors to be multiplied
Returns
-------
BaseFactor: `BaseFactor` representing factor product over all the `BaseFactor` instances in args.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.factors import factor_product
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x4', 'x1'], [2, 2, 2], range(8))
>>> phi = factor_product(phi1, phi2)
>>> phi.variables
['x1', 'x2', 'x3', 'x4']
>>> phi.cardinality
array([2, 3, 2, 2])
>>> phi.values
array([[[[ 0, 0],
[ 4, 6]],
[[ 0, 4],
[12, 18]],
[[ 0, 8],
[20, 30]]],
[[[ 6, 18],
[35, 49]],
[[ 8, 24],
[45, 63]],
[[10, 30],
[55, 77]]]])
"""
if not all(isinstance(phi, BaseFactor) for phi in args):
raise TypeError("Arguments must be factors")
# Check if all of the arguments are of the same type
elif len(set(map(type, args))) != 1:
raise NotImplementedError("All the args are expected to ",
"be instances of the same factor class.")
return reduce(lambda phi1, phi2: phi1 * phi2, args)
def factor_divide(phi1, phi2):
"""
Returns `DiscreteFactor` representing `phi1 / phi2`.
Parameters
----------
phi1: Factor
The Dividend.
phi2: Factor
The Divisor.
Returns
-------
DiscreteFactor: `DiscreteFactor` representing factor division `phi1 / phi2`.
Examples
--------
>>> from pgmpy.factors.discrete import DiscreteFactor
>>> from pgmpy.factors import factor_product
>>> phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
>>> phi2 = DiscreteFactor(['x3', 'x1'], [2, 2], range(1, 5))
>>> phi = factor_divide(phi1, phi2)
>>> phi.variables
['x1', 'x2', 'x3']
>>> phi.cardinality
array([2, 3, 2])
>>> phi.values
array([[[ 0. , 0.33333333],
[ 2. , 1. ],
[ 4. , 1.66666667]],
[[ 3. , 1.75 ],
[ 4. , 2.25 ],
[ 5. , 2.75 ]]])
"""
if not isinstance(phi1, BaseFactor) or not isinstance(phi2, BaseFactor):
raise TypeError("phi1 and phi2 should be factors instances")
# Check if all of the arguments are of the same type
elif type(phi1) != type(phi2):
raise NotImplementedError("All the args are expected to be instances",
"of the same factor class.")
return phi1.divide(phi2, inplace=False)
|
khalibartan/pgmpy
|
pgmpy/factors/base.py
|
Python
|
mit
| 3,183
|
import sys
import PConstant
class DummySchema(object):
def __init__(self):
self.request_body = {}
def schema():
return self.request_body
class TrialSchema(object):
def __init__(self):
self.request_body = {
"settings": {
"analysis": {
"filter": {
"english_stop": {
"type": "stop",
"stopwords": "_english_"
},
"english_keywords": {
"type": "keyword_marker", "keywords": ["bill"]
},
"english_stemmer": {
"type": "stemmer",
"language": "english"
},
"english_possessive_stemmer": {
"type": "stemmer",
"language": "possessive_english"
}
},
"analyzer": {
"english": {
"tokenizer": "standard",
"filter": [
"english_possessive_stemmer",
"lowercase",
"english_stop",
"english_keywords",
"english_stemmer"
]
}
}
}
},
"mappings" : {
"testarticles" : {
"properties" : {
"track_url": {
"type": "string",
"index": "not_analyzed"
},
"articlelink": {
"type": "string",
"index": "not_analyzed"
},
"versionNo": {
"type": "integer",
"index": "no"
},
"keywords": {
"type": "string"
},
"childCategory": {
"type": "string"
},
"contributor": {
"type": "string"
},
"section" : {
"type": "string"
},
"subheading" : {
"type": "string"
},
"source" : {
"type": "string",
"index": "not_analyzed"
},
"issueDate" : {
"type": "date"
},
"body": {
"type": "string",
"analyzer": "english"
},
"parentCategory": {
"type": "string"
},
"images" : {
"type": "string"
},
"publishDate" : {
"type": "date"
},
"slug" : {
"type": "string"
},
"publisher": {
"type": "string",
"index": "not_analyzed"
},
"channelslno": {
"type": "string",
"index": "not_analyzed"
},
"articleCreated": {
"type": "date"
},
"url": {
"type": "string"
},
"articleid": {
"type": "string",
"index": "not_analyzed"
},
"articleTitle": {
"type": "string"
}
}
}
}
}
def schema(self):
return self.request_body
class ESDatabaseMetaStore(object):
def get_request_body(self, operation):
schemaobject = DummySchema()
if operation == PConstant.PConstant.TRIALDATA_SCHEMA.value:
schemaobject = TrialSchema()
return schemaobject.schema()
|
somilasthana/esearch
|
ESDatabaseMetaStore.py
|
Python
|
mit
| 4,873
|
# KidsCanCode - Game Development with Pygame video series
# Shmup game - part 3
# Video link: https://www.youtube.com/watch?v=33g62PpFwsE
# Collisions and bullets
import pygame
import random
WIDTH = 480
HEIGHT = 600
FPS = 60
# define colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
# initialize pygame and create window
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Shmup!")
clock = pygame.time.Clock()
class Player(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((50, 40))
self.image.fill(GREEN)
self.rect = self.image.get_rect()
self.rect.centerx = WIDTH / 2
self.rect.bottom = HEIGHT - 10
self.speedx = 0
def update(self):
self.speedx = 0
keystate = pygame.key.get_pressed()
if keystate[pygame.K_LEFT]:
self.speedx = -8
if keystate[pygame.K_RIGHT]:
self.speedx = 8
self.rect.x += self.speedx
if self.rect.right > WIDTH:
self.rect.right = WIDTH
if self.rect.left < 0:
self.rect.left = 0
def shoot(self):
bullet = Bullet(self.rect.centerx, self.rect.top)
all_sprites.add(bullet)
bullets.add(bullet)
class Mob(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((30, 40))
self.image.fill(RED)
self.rect = self.image.get_rect()
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.rect.y = random.randrange(-100, -40)
self.speedy = random.randrange(1, 8)
self.speedx = random.randrange(-3, 3)
def update(self):
self.rect.x += self.speedx
self.rect.y += self.speedy
if self.rect.top > HEIGHT + 10 or self.rect.left < -25 or self.rect.right > WIDTH + 20:
self.rect.x = random.randrange(WIDTH - self.rect.width)
self.rect.y = random.randrange(-100, -40)
self.speedy = random.randrange(1, 8)
class Bullet(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((10, 20))
self.image.fill(YELLOW)
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -10
def update(self):
self.rect.y += self.speedy
# kill if it moves off the top of the screen
if self.rect.bottom < 0:
self.kill()
all_sprites = pygame.sprite.Group()
mobs = pygame.sprite.Group()
bullets = pygame.sprite.Group()
player = Player()
all_sprites.add(player)
for i in range(8):
m = Mob()
all_sprites.add(m)
mobs.add(m)
# Game loop
running = True
while running:
# keep loop running at the right speed
clock.tick(FPS)
# Process input (events)
for event in pygame.event.get():
# check for closing window
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
player.shoot()
# Update
all_sprites.update()
# check to see if a bullet hit a mob
hits = pygame.sprite.groupcollide(mobs, bullets, True, True)
for hit in hits:
m = Mob()
all_sprites.add(m)
mobs.add(m)
# check to see if a mob hit the player
hits = pygame.sprite.spritecollide(player, mobs, False)
if hits:
running = False
# Draw / render
screen.fill(BLACK)
all_sprites.draw(screen)
# *after* drawing everything, flip the display
pygame.display.flip()
pygame.quit()
|
kidscancode/gamedev
|
tutorials/shmup/shmup-3.py
|
Python
|
mit
| 3,817
|
class Response(dict):
def __init__(self, *args, **kwargs):
super(Response, self).__init__(*args, **kwargs)
for (key, value) in self.items():
self[key] = self.convert_value(value)
def __getattr__(self, name):
return self.__getitem__(name)
def __getitem__(self, item):
try:
return super(Response, self).__getitem__(item)
except KeyError as error:
raise AttributeError(*error.args)
def __setattr__(self, name, value):
if isinstance(value, dict):
return self.__setitem__(name, Response(value))
else:
return self.__setitem__(name, value)
def convert_value(self, value, duping=False):
if isinstance(value, Response):
return value.copy()
elif isinstance(value, dict):
return Response(value)
elif isinstance(value, list):
return [self.convert_value(v) for v in value]
else:
return value
|
accepton/accepton-python
|
accepton/response.py
|
Python
|
mit
| 1,000
|
from sys import argv
script, filename = argv
print "We're going to erase %r." % filename
print "If you dont want that, hit CTRL-C(^C)."
print "If you do want that, hit RETURN."
raw_input("?")
print"Opening the file..."
target = open(filename,'w')
print "Truncating the file. Goodbye!"
target.truncate()
print "Now I'm going to ask you for three lines."
line1 = raw_input("line 1:")
line2 = raw_input("line 2:")
line3 = raw_input("line 3:")
print "I'm goint to write these to the file."
target.write(line1)
target.write("\n")
target.write(line2)
target.write("\n")
target.write(line3)
target.write("\n")
print "And finally,we close it."
target.close()
|
AisakaTiger/Learn-Python-The-Hard-Way
|
ex16.py
|
Python
|
mit
| 662
|
import os
import io
import re
import time
import json
import random
import datetime
import itertools
import functools
import sqlite3
import sphinxapi
from contextlib import closing
import flask
from flask import (Flask, request, redirect, session, url_for, render_template,
current_app, jsonify)
from logviewer.exc import AuthenticationError
from logviewer.parser import parse_log
from logviewer import routing, util
app = Flask(__name__)
app.url_map.converters['date'] = routing.DateConverter
@app.before_first_request
def init_jinja_env():
current_app.jinja_env.globals.update(
LOGBOT_PORT=current_app.config['LOGBOT_LISTEN'],
)
access_log = None
def filter_recent(messages, minutes):
n = len(messages)
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=minutes)
# loop from n-1 to 0
count = 0
for i in xrange(n - 1, -1, -1):
if now - messages[i]['time'] <= delta:
count += 1
else:
break
limit = max(count, 50)
if limit == n:
return messages, False
else:
return messages[-limit:], True
def group_messages(messages, thres):
it = iter(messages)
msg = next(it)
prev_time = msg['time']
group = [msg]
for msg in it:
if (msg['time'] - prev_time).seconds > thres:
yield group
group = []
group.append(msg)
prev_time = msg['time']
if group:
yield group
def expand_synonyms(query):
d = {}
with io.open(app.config['SYNONYM_PATH'], encoding='utf-8') as fp:
for line in fp:
words = line.rstrip().split(' ')
words.sort(key=len, reverse=True)
for word in words:
d[word] = words
terms = query.split()
expanded_terms = []
for term in terms:
if term in d:
expanded_terms.append('(' + ')|('.join(d[term]) + ')')
else:
expanded_terms.append('(' + term + ')')
return ' '.join(expanded_terms)
def sphinx_search(query, offset, count):
client = sphinxapi.SphinxClient()
client.SetServer('localhost', 9312)
client.SetWeights([100, 1])
client.SetSortMode(sphinxapi.SPH_SORT_EXTENDED,
'date DESC, channel ASC, @id DESC')
client.SetLimits(offset, count, 100000)
client.SetRankingMode(sphinxapi.SPH_RANK_PROXIMITY_BM25)
client.SetMatchMode(sphinxapi.SPH_MATCH_BOOLEAN)
result = client.Query(query, '*')
if result and 'matches' in result:
messages = []
for msg in result['matches']:
attrs = msg['attrs']
channel = attrs['channel']
t = time.localtime(attrs['time'])
d = str(attrs['date'])
date = datetime.datetime.strptime(d, '%Y%m%d').date()
key = (channel, date)
messages.append((key, {
'type': 'privmsg',
'channel': channel,
'no': attrs['no'],
'time': datetime.datetime(*t[:7]),
'nick': attrs['nick'].decode('utf-8'),
'text': attrs['content'].decode('utf-8'),
'is_bot': attrs['bot'],
}))
m = []
for k, v in itertools.groupby(messages, lambda x: x[0]):
channel, date = k
m.append((Log(channel, date), list(i[1] for i in v)))
return {
'total': result['total'],
'messages': m,
}
class Log(object):
def __init__(self, channel, date):
if not channel.startswith('#'):
raise ValueError()
self.name = channel[1:]
self.date = date
@property
def is_today(self):
return self.date == datetime.date.today()
@property
def path(self):
path = os.path.join(app.config['LOG_DIR'], self.name + '.log')
if not self.is_today:
return path + '.' + self.date.strftime('%Y%m%d')
else:
return path
@property
def exists(self):
return os.path.isfile(self.path)
def url(self, recent=None, **kwargs):
return url_for('log', channel=self.name, date=self.date, recent=recent, **kwargs)
def get_messages(self, start=None):
if not self.exists:
return
with io.open(self.path, encoding='utf-8', errors='replace') as fp:
for msg in parse_log(fp, start):
yield msg
@staticmethod
def today(channel):
return Log(channel, datetime.date.today())
@property
def yesterday(self):
return Log('#' + self.name, self.date - datetime.timedelta(days=1))
CANONICAL_PATTERN = re.compile(r'^[\^\|_]*([^\^\|_]*).*$')
def canonical(value):
value = value.lower()
m = CANONICAL_PATTERN.search(value)
if m is not None:
return m.group(1)
else:
return value
def hashed(value, limit):
return hash(value) % limit
app.jinja_env.filters.update(canonical=canonical, hash=hashed)
def get_default_channel():
return util.irc_channels(current_app.config['IRC_CHANNELS'])[0]
def verify_channel(channel):
channels = util.irc_channels(current_app.config['IRC_CHANNELS'])
if channel is None:
return channels[0]['name']
channel = u'#' + channel
channel_names = (i['name'] for i in channels)
if channel not in channel_names:
flask.abort(404)
return channel
def login_required(f):
@functools.wraps(f)
def _wrapped(*args, **kwargs):
if 'username' not in session:
return redirect(url_for('login', next=request.url))
return f(*args, **kwargs)
return _wrapped
@app.route('/favicon.ico')
def favicon():
flask.abort(404) # it's very annoying.
@app.route('/login', methods=['GET', 'POST'])
def login():
auth = app.config['AUTH_BACKEND']
return auth.login(error=None)
@app.route('/login/authenticate', methods=['GET', 'POST'])
def authenticate():
auth = app.config['AUTH_BACKEND']
try:
result = auth.authenticate()
except AuthenticationError as e:
return auth.login(error=e)
global access_log
session['username'] = result['username']
now = datetime.datetime.now()
if not access_log:
access_log = io.open(app.config['ACCESS_LOG_PATH'], 'a',
encoding='utf-8')
access_log.write(u'[%s] %s logged in\n' %
(now.isoformat(), session['username']))
access_log.flush()
redirect_url = request.args.get('next')
if not redirect_url:
redirect_url = flask.session['_next_url']
if not redirect_url:
redirect_url = url_for('index')
return redirect(redirect_url)
@app.route('/logout', methods=['GET', 'POST'])
def logout():
del session['username']
return redirect(url_for('login'))
@app.route('/')
@login_required
def index():
channels = util.irc_channels(current_app.config['IRC_CHANNELS'])
if len(channels) == 1:
return redirect(url_for('channel', channel=channels[0]['name'][1:]))
logs = []
for i in channels:
log = Log.today(i['name'])
message = list(log.get_messages())[-5:]
logs.append(dict(
log=log,
message=message,
))
return render_template('index.html',
logs=logs)
@app.route('/<channel>', endpoint='channel')
def channel_(channel):
channel = verify_channel(channel)
today = Log.today(channel)
return redirect(today.url(recent=30))
@app.route('/random', defaults={'channel': None}, endpoint='random')
@app.route('/<channel>/random', endpoint='random')
def random_(channel):
if channel is None:
channels = util.irc_channels(current_app.config['IRC_CHANNELS'])
channel_names = [i['name'] for i in channels]
else:
channel = verify_channel(channel)
for _ in range(10):
chan = channel or random.choice(channel_names)
ago = random.randrange(30, 600)
rand = datetime.date.today() - datetime.timedelta(days=ago)
log = Log(chan, rand)
if log.exists:
break
else:
return redirect(url_for('index'))
return redirect(log.url())
@app.route('/<date:date>', defaults={'channel': None})
@app.route('/<channel>/<date:date>')
@login_required
def log(channel, date):
if channel is None:
channel = get_default_channel()['name'][1:]
return redirect(url_for('log', channel=channel, date=date))
channel = verify_channel(channel)
channels = util.irc_channels(current_app.config['IRC_CHANNELS'])
channel_names = [i['name'][1:] for i in channels
if i['name'].startswith('#')]
log = Log(channel, date)
if not log.exists and not log.is_today:
flask.abort(404)
if 'from' in request.args:
start = int(request.args['from'])
else:
start = None
messages = list(log.get_messages(start=start))
if messages:
last_no = max(msg['no'] for msg in messages)
else:
last_no = 0
if request.is_xhr:
if messages:
html = render_template('_messages.html',
log=log, messages=messages, last_no=last_no)
return jsonify(html=html, last_no=last_no)
else:
return jsonify(html=None)
options = {}
if log.is_today and 'recent' in request.args:
recent = int(request.args['recent'])
messages, truncated = filter_recent(messages, recent)
if truncated:
options['recent'] = recent
messages = group_messages(messages, app.config['GROUP_THRES'])
return render_template('log.html',
today=Log.today(channel),
log=log,
messages=messages,
options=options,
last_no=last_no,
username=session['username'],
channel=channel,
channels=channel_names)
@app.route('/atom', defaults={'channel': None})
@app.route('/<channel>/atom')
def atom(channel):
# TODO: auth
# TODO: omit last group
if channel is None:
return redirect(url_for('atom', channel=get_default_channel()['name'][1:]))
channel = verify_channel(channel)
log = Log.today(channel)
if not log.exists:
flask.abort(404)
messages = group_messages(log.get_messages(), app.config['GROUP_THRES'])
messages = reversed(list(messages))
return render_template('atom_feed.xml',
log=log,
messages=messages,
channel=channel,
)
@app.route('/search')
@login_required
def search():
query = request.args['q']
offset = int(request.args.get('offset', '0'))
per_page = app.config['SEARCH_RESULTS_PER_PAGE']
query_pattern = expand_synonyms(query)
result = sphinx_search(query_pattern, offset=offset, count=per_page)
page = offset / per_page
pages = [{'url': url_for('search', q=query, offset=n * per_page),
'number': n + 1,
'current': n == page}
for n in xrange(result['total'] / per_page)]
return render_template('search_result.html',
query=query,
total=result['total'],
result=result['messages'],
pages=pages,
query_pattern=query_pattern)
def connect_db():
conn = sqlite3.connect(app.config['FLAG_DB'])
conn.row_factory = sqlite3.Row
return conn
@app.route('/<channel>/<date:date>/flags')
@login_required
def flags(channel, date):
channel = verify_channel(channel)
with closing(connect_db()) as db:
c = db.cursor()
c.execute('select * from flags where channel=? and date=? '
'order by line',
(channel, date))
return json.dumps([dict(row) for row in c])
@app.route('/<channel>/<date:date>/<line>/flags', methods=['POST'])
@login_required
def flag(channel, date, line):
channel = verify_channel(channel)
db = connect_db()
c = db.cursor()
c.execute('insert into flags (channel, date, time, line, title, user) '
'values(?, ?, ?, ?, ?, ?)',
(channel, date, request.form['time'], int(line),
request.form['title'], session['username']))
db.commit()
id = c.lastrowid
db.close()
return str(id)
if __name__ == '__main__':
app.config.from_envvar('LOGVIEWER_SETTINGS')
app.run(host='0.0.0.0', port=5000)
|
langdev/log.langdev.org
|
logviewer/app.py
|
Python
|
mit
| 12,597
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
# from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
# with open(path.join(here, 'README.md'), encoding='utf-8') as f:
# long_description = f.read()
setup(
name='cloudconsole',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.dev1',
description='Yet another Cloud Dashboard',
# long_description=long_description,
# The project's main homepage.
url='https://github.com/cloudconsole/cloudconsole',
# Author details
author='Ashok Raja',
author_email='ashokraja.r@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 1 - Planning',
'Framework :: Flask',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: pypy3',
],
# What does your project relate to?
keywords='cloud dashboard console',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'Flask==1.0',
'Flask-DebugToolbar==0.10.0',
'pymongo==3.2.1',
'humanize==0.5.1',
'celery==3.1.19',
'WTForms==2.1',
'flask-script==2.0.5'
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing
# -additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
cloudconsole/cloudconsole
|
setup.py
|
Python
|
mit
| 4,203
|
from django import template
from ..models import get_related_documents
register = template.Library()
@register.filter(name='get_related_documents')
def related_documents(model):
return get_related_documents(model)
|
yourlabs/django-documents
|
documents/templatetags/documents_tags.py
|
Python
|
mit
| 222
|
import requests
from django.conf import settings
from django.db import models
from django.urls import reverse_lazy
from UserManagement.models import Attendent, ExpertProfile, Team
# Import for Google Maps Plugin
from django_google_maps import fields as map_fields
from django.utils.text import slugify
from datetime import timedelta, date, datetime
# --------------Location Management -------------------
# Define general helper Functions
START_DATE = datetime(2017, 8, 5)
END_DATE = datetime(2017, 8, 12)
def daterange(start_date, end_date):
"""
Returns a generator object to be used for the construction of
planning tables to manage Time Availabilities.
"""
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
def geocode(lat, lng):
base = "http://maps.googleapis.com/maps/api/geocode/json?"
params = "latlng={lat},{lon}&sensor=false".format(
lat=lat,
lon=lng
)
url = "{base}{params}".format(base=base, params=params)
response = requests.get(url)
return response.json()['results'][0]['formatted_address']
class Venue(models.Model):
"""A specific location that houses rooms where Sessions take place."""
name = models.CharField(max_length=100)
slug = models.SlugField(blank=True, null=True)
img_venue = models.ImageField(upload_to="venues/locationImg/", blank=True)
# Google Maps Plugin - LocationInformation
map_img = models.FileField(upload_to='venues/locationMap/', blank=True)
address = map_fields.AddressField(max_length=200, null=True)
geolocation = map_fields.GeoLocationField(max_length=100, null=True)
directions = models.TextField(max_length=350, null=True)
location_info = models.TextField(max_length=1000, null=True)
# Management Relevant Information
BOOKING_STATUS = (
(1, 'inquired'),
(2, 'reserved'),
(3, 'confirmed')
)
booking_status = models.IntegerField(choices = BOOKING_STATUS)
def get_status(self):
return self.booking_status
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse_lazy('portal:venue_detail', args=[self.slug])
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
self.address = geocode(self.geolocation.lat, self.geolocation.lon)
super().save(*args, **kwargs)
class Room(models.Model):
"""A specific Room available to be assigned to a session."""
venue = models.ForeignKey(Venue)
name = models.CharField(max_length=60)
slots = {}
# Comment section for information as to equipment and specials
directions = models.TextField(max_length=2000, null=True)
level = models.CharField(max_length=50, null=True)
notes = models.TextField(blank=True, null=True)
# Display information
img_presentation = models.ImageField(upload_to="venues/presentationImg/", blank=True)
# Management related hidden fields
_internal_comments = models.TextField(blank=True, null=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def check_timeslots(self):
days = {key:None for key in daterange(START_DATE, END_DATE)}
availabilities = self.availability_set.all()
for day in days:
for slot in availabilities:
if slot.free_from.day == day.day:
days[day] = slot
return days
# ----- STATE MODELS ----------
class Availability(models.Model):
"""
A continous stretch of available time for a given Room
to be used with a Session.
"""
room = models.ForeignKey(Room)
slug = models.SlugField(blank=True)
free_from = models.DateTimeField()
free_till = models.DateTimeField()
# Calculate amount of time available in this stretch
def schedule(self, session):
"""
Checks if the Time associated with the Session planned fits
within the given availability. Returns Boolean answer.
"""
return True
def get_duration(self):
duration = self.free_from - self.free_till
return duration
def check_available(self, Session):
if Session.startTime > self.free_from and Session.endTime <= self.free_till:
return True
else:
return False
def __str__(self):
return self.room.name
# TODO: Turn the location ino GPS for displaying on a map
def save(self, *args, **kwargs):
self.slug = slugify(self.room.name)
super().save(*args, **kwargs)
# ---------------Session Management-----------------------
class Session(models.Model):
"""A specific Round of Mediation between two Negotiating Teams and a Mediator
scheduled for a given Date and Time in a room at a certain Venue."""
name = models.CharField(max_length=100)
slug = models.SlugField(blank=True)
# The Teams taking part
mediatorTeam = models.ForeignKey(Team, related_name="mediatorteam")
negotiatorTeam = models.ForeignKey(Team, related_name="negotiatorteam")
# The Assessors taking part
assessors = models.ManyToManyField(ExpertProfile)
# The Room scheduled
venue = models.ForeignKey(Venue)
room = models.ForeignKey(Room)
# The Timeslot scheduled
startTime = models.DateTimeField(blank=True, null=True)
endTime = models.DateTimeField(blank=True, null=True)
# Additional Ressources Scheduled
notes = models.TextField(blank=True, null=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse_lazy('session:session_detail', args=[self.slug])
# ----------------Work Management-------------------------
class Shift(models.Model):
"""A flexible amount of time a given Person is available for Work at the Comp."""
staff = models.ForeignKey(Attendent)
start = models.DateTimeField()
end = models.DateTimeField()
def __str__(self):
return "{} to {}".format(self.start, self.end)
|
SkillSmart/ConferenceManagementSystem
|
SessionManagement/models.py
|
Python
|
mit
| 6,287
|
import unittest
import tempfile
import shutil
import os
from tempfile import TemporaryDirectory
from threading import Thread
from eubin import pop3
from mockserver import POP3Server
class TestStatelog(unittest.TestCase):
def setUp(self):
self.tmpdir = TemporaryDirectory()
self.logpath = os.path.join(self.tmpdir.name, 'statelog')
self.nonexist = os.path.join(self.tmpdir.name, 'nonexist')
with open(self.logpath, 'wb') as fw:
fw.write(b'001\n')
fw.write(b'002\n')
def tearDown(self):
self.tmpdir.cleanup()
def test_load(self):
state = pop3.statelog_load(self.logpath)
self.assertEqual(state, {b'001', b'002'})
def test_load_fallback(self):
state = pop3.statelog_load(self.nonexist)
self.assertEqual(state, set())
def test_create(self):
pop3.statelog_save(self.logpath, {b'001', b'002'})
with open(self.logpath, 'rb') as fp:
self.assertEqual(fp.readline(), b'001\n')
self.assertEqual(fp.readline(), b'002\n')
class TestPOP3(unittest.TestCase):
def setUp(self):
# Mocking POP3 server
self.server = POP3Server()
self.server.set_respdict({
'greeting': b'+OK Greetings, Human! <message-id>\r\n',
'user': b'+OK Valid user\r\n',
'pass': b'+OK Passowrd ok\r\n',
'apop': b'+OK Authentication successful\r\n',
'stat': b'+OK 2 320\r\n',
'dele': b'+OK Mark the mail as deleted.\r\n',
'retr': b'+OK\r\n<mail-text>\r\n.\r\n',
'quit': b'+OK Good bye!',
'top': b'+OK\r\n<header>\r\n.\r\n',
'uidl': b'+OK\r\n1 001\r\n2 002\r\n.\r\n',
})
self.host, self.port = self.server.get_conninfo()
Thread(target=self.server.run).start()
# Set up a mailbox
self.mailbox = tempfile.mkdtemp()
for dirname in ('new', 'cur', 'tmp'):
os.mkdir(os.path.join(self.mailbox, dirname))
# Set up a hashlog
self.hashlog = tempfile.NamedTemporaryFile()
def tearDown(self):
shutil.rmtree(self.mailbox)
self.hashlog.close()
def test_login(self):
client = pop3.Client(self.host, self.port)
client.login('user', 'password')
client.quit()
recvlog = self.server.get_logiter()
self.assertEqual(next(recvlog), b'USER user\r\n')
self.assertEqual(next(recvlog), b'PASS password\r\n')
self.assertEqual(next(recvlog), b'QUIT\r\n')
def test_login_apop(self):
client = pop3.Client(self.host, self.port)
client.login('user', 'password', apop=True)
client.quit()
recvlog = self.server.get_logiter()
self.assertEqual(next(recvlog), b'APOP user 88670a99aa1930515aae5569677fac19\r\n')
self.assertEqual(next(recvlog), b'QUIT\r\n')
def test_fetch(self):
client = pop3.Client(self.host, self.port)
client.login('user', 'password')
client.fetch(self.mailbox)
client.quit()
recvlog = self.server.get_logiter()
self.assertEqual(next(recvlog), b'USER user\r\n')
self.assertEqual(next(recvlog), b'PASS password\r\n')
self.assertEqual(next(recvlog), b'STAT\r\n')
self.assertEqual(next(recvlog), b'RETR 1\r\n')
self.assertEqual(next(recvlog), b'DELE 1\r\n')
self.assertEqual(next(recvlog), b'RETR 2\r\n')
self.assertEqual(next(recvlog), b'DELE 2\r\n')
self.assertEqual(next(recvlog), b'QUIT\r\n')
def test_fetch_copy(self):
client = pop3.Client(self.host, self.port)
client.login('user', 'password')
client.fetch_copy(self.mailbox, logpath=self.hashlog.name)
client.fetch_copy(self.mailbox, logpath=self.hashlog.name) # Retry
client.quit()
recvlog = self.server.get_logiter()
self.assertEqual(next(recvlog), b'USER user\r\n')
self.assertEqual(next(recvlog), b'PASS password\r\n')
self.assertEqual(next(recvlog), b'STAT\r\n')
self.assertEqual(next(recvlog), b'UIDL\r\n')
self.assertEqual(next(recvlog), b'RETR 1\r\n')
self.assertEqual(next(recvlog), b'RETR 2\r\n')
self.assertEqual(next(recvlog), b'STAT\r\n')
self.assertEqual(next(recvlog), b'UIDL\r\n')
self.assertEqual(next(recvlog), b'QUIT\r\n')
def test_fetch_contents(self):
client = pop3.Client(self.host, self.port)
client.login('user', 'password')
client.fetch(self.mailbox)
client.quit()
# Enter into 'new' directory of the mailbox.
os.chdir(os.path.join(self.mailbox, 'new'))
# There must be two mails exactly.
mails = os.listdir('./')
self.assertEqual(len(mails), 2)
# Check the contents of these mails.
for mail in mails:
with open(mail, 'rb') as fp:
self.assertEqual(fp.read(), b'<mail-text>\n')
if __name__ == '__main__':
unittest.main()
|
fujimotos/Eubin
|
test/test_pop3.py
|
Python
|
mit
| 5,035
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Django settings for mysite project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'db.sqlite3', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'hello.context_processors.categories', # 自定义的 context processors 函数
)
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '!sb(g7oczsg@xq$ovv^*%xok+ueczmo@1wg=iceb!tnh671oy@'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mysite.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'hello',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
mozillazg/django-simple-projects
|
projects/custom-context-processors/mysite/settings.py
|
Python
|
mit
| 5,731
|
from hcsr04sensor import sensor
# Created by Al Audet
# MIT License
def main():
"""Calculate the depth of a liquid in centimeters using a HCSR04 sensor
and a Raspberry Pi"""
trig_pin = 17
echo_pin = 27
# Default values
# unit = 'metric'
# temperature = 20
hole_depth = 80 # centimeters
# Create a distance reading with the hcsr04 sensor module
value = sensor.Measurement(trig_pin, echo_pin)
raw_measurement = value.raw_distance()
# To overide default values you can pass the following to value
# value = sensor.Measurement(trig_pin,
# echo_pin,
# temperature=10,
# )
# Calculate the liquid depth, in centimeters, of a hole filled
# with liquid
liquid_depth = value.depth(raw_measurement, hole_depth)
print("Depth = {} centimeters".format(round(liquid_depth, 1)))
if __name__ == "__main__":
main()
|
alaudet/hcsr04sensor
|
recipes/metric_depth.py
|
Python
|
mit
| 979
|
from pabiana import Area, repo
from pabiana.utils import multiple
from . import utils
area = Area(repo['area-name'], repo['interfaces'])
premise = utils.setup
config = {
'clock-name': 'clock',
'clock-slots': multiple(1, 6),
'context-values': {
'temperature': 18,
'window-open': False
}
}
@area.register
def increase_temp():
area.context['temperature'] += 0.1
area.autoloop('increase_temp')
@area.register
def lower_temp():
area.context['temperature'] -= 0.1
area.autoloop('lower_temp')
@area.register
def keep_temp():
pass
@area.register
def window(open):
area.context['window-open'] = open
@area.scheduling
def schedule(demand: dict, alterations: set, looped: set=None, altered: set=None):
"""Prioritizes Triggers called from outside."""
if looped is not None and len(demand) != len(looped):
for func in looped: del demand[func]
if keep_temp in demand:
demand.pop(increase_temp, None)
demand.pop(lower_temp, None)
elif lower_temp in demand:
demand.pop(increase_temp, None)
return demand, alterations
@area.pulse
def routine():
if area.time % 8 == 0:
area.publish({
'temperature': area.context['temperature'],
'window-open': area.context['window-open']
})
|
kankiri/pabiana
|
demos/smarthome/smarthome/__init__.py
|
Python
|
mit
| 1,208
|
# -*- coding: utf-8 -*-
"""This module contains base class for dictionary entry"""
class Entry():
"""Store info of a word that is one part of speech.
Store pronounciation,sound, part of speech, word explanation and
example sentences about one word or expression.
"""
def __init__(self):
"""Initialize a entry instance.
pronounciation:
[(tag, IPA), ...]
sound:
[herf, ..]
separate_storage:
Whether the explantion and the example sentences
are stored side by side or separately.
If true:[[explanation, ...],[example sentences, ...]]
If false:[[explanation,example sentences], ...]
"""
# FIXME: pronouciation datastructure as list
self.pronounciation = []
self.sound = []
self.pos = ""
self.separate_storage = True
self.explanation = []
class SuperEntry():
"""Store explanations of a word that may be different part of speech.
Store word text, source and entries of a word.
"""
def __init__(self, source, source_name, word_text):
"""Initialize a SuperEntry instance.
valid: Whether there is valid response from the dictionary
source: Source of the SuperEntry
source_name:Name of the source of the SuperEntry
wordText: Text of the word or expression
entries: List of entries
"""
self.valid = True
self.source = source
self.source_name = source_name
self.word_text = word_text
self.entries = []
def show_no_style(self):
"""Generate displayable formated text with out style sheet"""
pass
def show_with_style(self):
"""Generate displayable formated text with style sheet"""
pass
def dbformat(self):
"""Generate structured data to be stored in database"""
pass
|
RihanWu/vocabtool
|
dict/base_class.py
|
Python
|
mit
| 1,940
|
"""
Wave class
Jacob Dein 2016
nacoustik
Author: Jacob Dein
License: MIT
"""
from sys import stderr
from os import path
from sox import file_info
import numpy as np
from scipy.io import wavfile
class Wave:
"""Create wave object"""
def __init__(self, wave):
"""
Parameters
----------
wave: file path to WAV file or numpy array of a WAV signal samples
array must be in the shape (n_samples, n_channels)
"""
if type(wave) is str:
self.filepath = wave
self.basename = path.basename(wave)
# properties
self.bit_depth = file_info.bitrate(wave) # bit depth
self.n_samples = file_info.num_samples(wave) # number of samples
self.n_channels = file_info.channels(wave) # number of channels
self.duration = file_info.duration(wave) # duration
else:
self.samples = wave
self.n_samples = len(wave) # number of samples
self.n_channels = wave.shape[1] # number of channels
self.channels = np.arange(self.n_channels) # channels
self.normalized = False # normalized
# def __str__():
def normalize(self, value = None):
"""
Normalize wave file
Parameters
----------
value: float, default = None
normalize the wave signal
If 'None', the wave will be normalized
based on the potential maximum value
that is determined by the bit depth of each sample
"""
try:
self.samples = self.samples / (2.**(self.bit_depth - 1))
self.normalized = True
except AttributeError as error:
print(error, file = stderr)
def read(self):
"""
Read wave file
"""
try:
self.rate, self.samples = wavfile.read(self.filepath)
except AttributeError as error:
print(error, file = stderr)
|
jacobdein/nacoustik
|
nacoustik/wave.py
|
Python
|
mit
| 1,700
|
"""Read any number of files and write a single merged and ordered file"""
import time
import fastparquet
import numpy as np
import pandas as pd
def generate_input_files(input_filenames, n, max_interval=0):
for i_fn in input_filenames:
df = _input_data_frame(n,
max_interval=max_interval,
relative_time_period=1000)
df.to_csv(i_fn)
def _input_data_frame(n, max_interval, relative_time_period):
"""
:param n: Number of timestamp entries
:param max_interval: Maximum time difference between non-monotonically
increasing entries
:param relative_time_period: Maximum time period between first and last
timestamp entries
:return:
"""
ts = 'timestamp'
# Timestamp (int) index
now = int(time.time())
low = now - relative_time_period
high = now
rel_time = np.random.randint(low=low, high=high, size=n)
rel_time.sort()
# Generate jitter in output: swap some times if < max_interval
# Do not swap consecutive pairs
one_diff = np.diff(rel_time, n=1)
one_diff = np.insert(one_diff, [0], max_interval)
two_diff = np.diff(rel_time, n=2)
two_diff = np.concatenate(([max_interval, max_interval], two_diff))
# Time difference less than max_interval
diff_lt_lbl = (one_diff < max_interval) & (two_diff < max_interval)
# Do not swap consecutive pairs
swap_lbl = np.random.rand(n) >= 0.5
lst_nonswap_lbl = ~np.roll(swap_lbl, shift=1)
nonconsec_swap_lbl = swap_lbl & lst_nonswap_lbl
# Randomly choose swaps among time difference less than max_interval
swap_diff_lt_lbl = nonconsec_swap_lbl & diff_lt_lbl
# Swap
for i, swap in enumerate(swap_diff_lt_lbl):
if swap:
rel_time[i-1], rel_time[i] = rel_time[i], rel_time[i-1]
index = pd.Index(data=rel_time, name=ts)
# Random data
data = {
'a': list(range(n))
}
return pd.DataFrame(data=data, index=index)
if __name__ == '__main__':
input_filenames = [
'input/a.csv',
'input/b.csv',
'input/c.csv',
]
# generate_input_files(input_filenames, n=15, max_interval=15)
# output_filename = 'output/bar.csv'
# with open(output_filename, 'w') as output_fp:
# merge_files(input_filenames, output_fp.write)
df = _input_data_frame(100, max_interval=10, relative_time_period=1000)
|
rheineke/log_merge
|
main.py
|
Python
|
mit
| 2,419
|
import unittest
from yahtr.utils.attr import get_from_dict, copy_from_instance
from yahtr.utils.color import Color
class Dummy:
pass
class TestAttr(unittest.TestCase):
def setUp(self):
self.dummy = Dummy()
self.args = ['p1', 'p2', 'p3']
self.d = {'p1': 'p1', 'p2': None, 'p4': {}}
def test_get_from_dict(self):
get_from_dict(self.dummy, self.d, *self.args)
self.assertEqual(self.dummy.p1, 'p1')
self.assertEqual(self.dummy.p2, None)
self.assertEqual(self.dummy.p3, None)
def test_copy_from_instance(self):
dummy = Dummy()
get_from_dict(dummy, self.d, *self.args)
copy_from_instance(dummy, self.dummy, *self.args)
self.assertEqual(self.dummy.p1, 'p1')
self.assertEqual(self.dummy.p2, None)
self.assertEqual(self.dummy.p3, None)
class TestColor(unittest.TestCase):
def test_core(self):
self.assertEqual(Color.white.r, 1.)
self.assertEqual(Color.white.g, 1.)
self.assertEqual(Color.white.b, 1.)
self.assertEqual(Color.white.a, 1.)
self.assertEqual(Color.white.rgb, [1., 1., 1.])
self.assertEqual(Color.white.rgb_dict, {'r': 1., 'g': 1., 'b': 1.})
def test_access(self):
red1 = Color('red')
red2 = Color.red
red3 = Color.red
self.assertNotEqual(red1, red2)
self.assertNotEqual(red2, red3)
self.assertNotEqual(red1, red3)
self.assertEqual(red1.r, red2.r)
if __name__ == "__main__":
unittest.main()
|
fp12/yahtr
|
tests/tests_utils.py
|
Python
|
mit
| 1,541
|
# -*- coding: utf-8 -*-
"""Exception classes."""
class JSONAPIError(Exception):
"""Base class for all exceptions in this package."""
pass
class IncorrectTypeError(JSONAPIError, ValueError):
"""Raised when client provides an invalid `type` in a request."""
pointer = '/data/type'
default_message = 'Invalid type. Expected "{expected}".'
def __init__(self, message=None, actual=None, expected=None):
message = message or self.default_message
format_kwargs = {}
if actual:
format_kwargs['actual'] = actual
if expected:
format_kwargs['expected'] = expected
self.detail = message.format(**format_kwargs)
super(IncorrectTypeError, self).__init__(self.detail)
@property
def messages(self):
"""JSON API-formatted error representation."""
return {
'errors': [
{'detail': self.detail, 'pointer': self.pointer}
]
}
|
Tim-Erwin/marshmallow-jsonapi
|
marshmallow_jsonapi/exceptions.py
|
Python
|
mit
| 976
|
"""
Second-Hand-Shop Project
@author: Malte Gerth
@copyright: Copyright (C) 2015 Malte Gerth
@license: MIT
@maintainer: Malte Gerth
@email: mail@malte-gerth.de
"""
from django import template
from events.models import get_active_event
__author__ = "Malte Gerth <mail@malte-gerth.de>"
__copyright__ = "Copyright (C) 2015 Malte Gerth"
__license__ = "MIT"
register = template.Library()
@register.assignment_tag
def is_volunteer(user):
volunteer = user.volunteer.filter(event=get_active_event()).first()
if volunteer is None:
return False
return bool(volunteer.shifts.count())
|
JanMalte/secondhandshop_server
|
src/volunteers/templatetags/volunteer.py
|
Python
|
mit
| 613
|
#! /usr/bin/env python
from hashlib import md5
from Crypto.Cipher import AES
from Crypto import Random
import struct
class PaddingError(Exception):
pass
def derive_key_and_iv(password, salt, key_length, iv_length):
d = d_i = ''
while len(d) < key_length + iv_length:
d_i = md5(d_i + password + salt).digest()
d += d_i
return d[:key_length], d[key_length:key_length+iv_length]
def aes_encrypt(plaintext, password, key_length=32):
if len(plaintext) == 0:
return ''
bs = AES.block_size
# generate random salt
salt = Random.new().read(bs)
# derive key, iv
key, iv = derive_key_and_iv(password, salt, key_length, bs)
#iv = chr(0)*16
cipher = AES.new(key, AES.MODE_CBC, iv)
chunk = plaintext
padding_length = (bs - len(chunk) % bs) or bs
chunk += padding_length * chr(padding_length)
return cipher.encrypt(chunk), salt, iv
def aes_decrypt(ciphertext, password, salt, key_length=32):
if len(ciphertext) == 0:
return ''
bs = AES.block_size
# derive key, iv
key, iv = derive_key_and_iv(password, salt, key_length, bs)
#iv = chr(0)*16
cipher = AES.new(key, AES.MODE_CBC, iv)
plaintext = cipher.decrypt(ciphertext)
#print "plain:", [ord(c) for c in plaintext]
check_paddings(plaintext)
padding_length = ord(plaintext[-1])
return plaintext[:-padding_length]
def check_paddings(s):
padding = ord(s[-1])
if padding > 16 or padding <= 0:
# padding error
raise PaddingError()
i = 2
while i <= padding:
if ord(s[-i]) != padding:
# padding error
raise PaddingError()
i += 1
def poa(ciphertext, padding_oracle, iv=None):
bs = AES.block_size
if len(ciphertext) < bs or len(ciphertext) % bs != 0:
print "Invalid ciphertext"
return
if len(ciphertext) == bs and iv is None:
print "Cannot crack the first block without IV!"
return
block_num = len(ciphertext) / bs
# Crack the last block first
target_block = ciphertext[(block_num-1)*bs:]
if block_num-1 == 0:
pre_block = iv
else:
pre_block = ciphertext[(block_num-2)*bs:block_num*bs]
plaintext = crack_block(target_block, pre_block)
check_paddings(plaintext)
padding = ord(plaintext[-1])
plaintext = plaintext[:-padding]
for i in xrange(block_num - 2, -1, -1):
target_block = ciphertext[i*bs:(i+1)*bs]
if i == 0:
pre_block = iv
else:
pre_block = ciphertext[(i-1)*bs:i*bs]
p = crack_block(target_block, pre_block)
plaintext = p + plaintext
return plaintext
def crack_block(target_block, pre_block):
bs = AES.block_size
plain_block = [0] * bs
inter_block = [0] * bs
# Crack the last byte
for i in xrange(0, 0xff+1):
mock_block = struct.pack('16B', *([0]*15 + [i]))
if padding_oracle(mock_block+target_block):
# check padding value
padding_value = 1
for next_padding_value in xrange(2, bs+1):
mock_block = struct.pack('16B', *([0]*(bs-next_padding_value) + [1] + [0]*(next_padding_value-2) + [i]))
#print 'mock:', [ord(c) for c in mock_block]
if padding_oracle(mock_block+target_block):
break
padding_value = next_padding_value
print 'xxx:', padding_value
I2 = i ^ padding_value
inter_block[15] = I2
# P2: the last byte of the plaintext of the target_block
P2 = ord(pre_block[15]) ^ I2
plain_block[15] = P2
break
# Crack remain bytes
for current_pos in xrange(14, -1, -1):
padding_value = bs - current_pos
mock_tail = [padding_value ^ I2 for I2 in inter_block[current_pos+1:]]
for i in xrange(0, 0xff+1):
mock_block = struct.pack('16B', *([0]*(current_pos) + [i] + mock_tail))
if padding_oracle(mock_block+target_block):
I2 = i ^ padding_value
inter_block[current_pos] = I2
P2 = ord(pre_block[current_pos]) ^ I2
plain_block[current_pos] = P2
return ''.join([chr(v) for v in plain_block])
if __name__ == '__main__':
import argparse, os
parser = argparse.ArgumentParser(description='Padding Oracle Attack Demo.')
parser.add_argument('--target', default=('a'*16+'b'*16+'c'*16+'d'*4))
parser.add_argument('-p', '--password', dest='password', default='PaddingOracleAttack')
parser.add_argument('--key-len', type=int, default=32)
parser.add_argument('-d', '--decrypt', dest='decrypt_flag', action='store_const', const=True, help='decrypt')
args = parser.parse_args()
print args
target = args.target
password = args.password
key_length = args.key_len
decrypt_flag = args.decrypt_flag
if decrypt_flag:
ret = aes_decrypt(target[16:], password, target[:16], key_length)
if not ret:
print 'Decrpt failed!'
else:
print ret
else:
ciphertext, salt, iv = aes_encrypt(target, password, key_length)
print aes_decrypt(ciphertext, password, salt, key_length)
def padding_oracle(ciphertext):
try:
aes_decrypt(ciphertext, password, salt, key_length)
return True
except PaddingError:
return False
print 'Cracked:', poa(ciphertext, padding_oracle, iv)
|
mindeng/crypto-utils
|
padding-oracle-attack/poa_test.py
|
Python
|
mit
| 5,516
|
import math
print("Usage:")
print("City names can be as specific as needed, for example: San Fransico, CA, or Chicago, IL, USA")
cityFirst = input("Enter the name of the first city: ")
citySecond = input("Enter the name of the second city: ")
|
tanishq-dubey/personalprojects
|
Distance Between Cities/distCities.py
|
Python
|
mit
| 252
|
import re
from markdown.preprocessors import Preprocessor
from markdown import Extension
__version__ = "0.1.1"
class JournalPreprocessor(Preprocessor):
pattern_time = re.compile(r'^(\d{4})$')
pattern_date = re.compile(r'^(\d{4}-\d{2}-\d{2})$')
def run(self, lines):
return [self._t1(self._t2(line)) for line in lines]
def _t1(self, line):
return self.pattern_time.sub(self._replacer_time, line)
def _t2(self, line):
return self.pattern_date.sub(self._replacer_date, line)
def _replacer_time(self, match):
time_str = match.groups()
return '<h4>%s</h4>' % (time_str)
def _replacer_date(self, match):
time_str = match.groups()
return '<h3>%s</h3>' % (time_str)
class JournalExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.preprocessors.add('journal', JournalPreprocessor(md), '<reference')
def makeExtension(configs=None):
return JournalExtension(configs=configs)
|
iandennismiller/mdx_journal
|
mdx_journal/__init__.py
|
Python
|
mit
| 992
|
import numpy as np
import sympy
from ..helpers import book, untangle, z
from ._albrecht import albrecht_4 as stroud_s2_9_1
from ._albrecht import albrecht_5 as stroud_s2_11_2
from ._albrecht import albrecht_6 as stroud_s2_13_2
from ._albrecht import albrecht_7 as stroud_s2_15_2
from ._albrecht import albrecht_8 as stroud_s2_17_1
from ._albrecht_collatz import albrecht_collatz as stroud_s2_3_2
from ._hammer_stroud import hammer_stroud_11_2 as stroud_s2_3_1
from ._hammer_stroud import hammer_stroud_12_2 as stroud_s2_5_2
from ._hammer_stroud import hammer_stroud_18 as stroud_s2_7_2
from ._helpers import S2Scheme, register
from ._mysovskih import mysovskih_1 as stroud_s2_4_1
from ._mysovskih import mysovskih_2 as stroud_s2_11_1
from ._mysovskih import mysovskih_3 as stroud_s2_15_1
from ._peirce_1956 import peirce_1956_1 as stroud_s2_7_1
from ._peirce_1956 import peirce_1956_2 as stroud_s2_9_5
from ._peirce_1956 import peirce_1956_3 as stroud_s2_11_4
from ._rabinowitz_richter import rabinowitz_richter_1 as stroud_s2_9_2
from ._rabinowitz_richter import rabinowitz_richter_2 as stroud_s2_9_4
from ._rabinowitz_richter import rabinowitz_richter_4 as stroud_s2_11_3
from ._rabinowitz_richter import rabinowitz_richter_5 as stroud_s2_13_1
from ._radon import radon
_source = book(
authors=["Arthur Stroud"],
title="Approximate Calculation of Multiple Integrals",
publisher="Prentice Hall",
year="1971",
)
def stroud_s2_5_1():
return radon(0)
def stroud_s2_9_3():
# spherical product gauss 9
sqrt = np.vectorize(sympy.sqrt)
pm_ = np.array([+1, -1])
cos = np.vectorize(sympy.cos)
sin = np.vectorize(sympy.sin)
frac = sympy.Rational
pi = sympy.pi
r1, r2 = sqrt((6 - pm_ * sqrt(6)) / 10)
a = 2 * (np.arange(10) + 1) * pi / 10
x = np.array([cos(a), sin(a)]).T
B0 = frac(1, 9)
B1, B2 = (16 + pm_ * sqrt(6)) / 360
data = [(B0, z(2)), (B1, r1 * x), (B2, r2 * x)]
points, weights = untangle(data)
points = np.ascontiguousarray(points.T)
d = {"plain": [weights, points[0], points[1]]}
return S2Scheme("Stroud S2 9-3", d, 9, _source)
register(
[
stroud_s2_3_1,
stroud_s2_3_2,
stroud_s2_4_1,
stroud_s2_5_1,
stroud_s2_5_2,
stroud_s2_7_1,
stroud_s2_7_2,
stroud_s2_9_1,
stroud_s2_9_2,
stroud_s2_9_3,
stroud_s2_9_4,
stroud_s2_9_5,
stroud_s2_11_1,
stroud_s2_11_2,
stroud_s2_11_3,
stroud_s2_11_4,
stroud_s2_13_1,
stroud_s2_13_2,
stroud_s2_15_1,
stroud_s2_15_2,
stroud_s2_17_1,
]
)
|
nschloe/quadpy
|
src/quadpy/s2/_stroud.py
|
Python
|
mit
| 2,646
|
# encoding: utf-8
"""
Test suite for the docx.oxml.parts module.
"""
from __future__ import absolute_import, print_function, unicode_literals
import pytest
from .unitdata.document import a_body
from ..unitdata.section import a_type
from ..unitdata.text import a_p, a_pPr, a_sectPr
class DescribeCT_Body(object):
def it_can_clear_all_the_content_it_holds(self):
"""
Remove all content child elements from this <w:body> element.
"""
cases = (
(a_body().with_nsdecls(),
a_body().with_nsdecls()),
(a_body().with_nsdecls().with_child(a_p()),
a_body().with_nsdecls()),
(a_body().with_nsdecls().with_child(a_sectPr()),
a_body().with_nsdecls().with_child(a_sectPr())),
(a_body().with_nsdecls().with_child(a_p()).with_child(a_sectPr()),
a_body().with_nsdecls().with_child(a_sectPr())),
)
for before_body_bldr, after_body_bldr in cases:
body = before_body_bldr.element
# exercise -----------------
body.clear_content()
# verify -------------------
assert body.xml == after_body_bldr.xml()
def it_can_add_a_section_break(self, section_break_fixture):
body, expected_xml = section_break_fixture
sectPr = body.add_section_break()
assert body.xml == expected_xml
assert sectPr is body.get_or_add_sectPr()
# fixtures -------------------------------------------------------
@pytest.fixture
def section_break_fixture(self):
body = (
a_body().with_nsdecls().with_child(
a_sectPr().with_child(
a_type().with_val('foobar')))
).element
expected_xml = (
a_body().with_nsdecls().with_child(
a_p().with_child(
a_pPr().with_child(
a_sectPr().with_child(
a_type().with_val('foobar'))))).with_child(
a_sectPr().with_child(
a_type().with_val('foobar')))
).xml()
return body, expected_xml
|
guilhermebr/python-docx
|
tests/oxml/parts/test_document.py
|
Python
|
mit
| 2,154
|
class Flower:
"""A flower."""
def __init__(self, name, petals, price):
"""Create a new flower instance.
name the name of the flower (e.g. 'Spanish Oyster')
petals the number of petals exists (e.g. 50)
price price of each flower (measured in euros)
"""
self._name = str(name)
self.set_petals(petals)
self.set_price(price)
def set_name(self, name):
self._name = str(name)
def get_name(self):
return self._name
def set_petals(self, petals):
try:
self._petals = int(petals)
except (TypeError, ValueError):
print('set_petals(): could not parse "%s" to int().' % petals)
def get_petals(self):
return self._petals
def set_price(self, price):
try:
self._price = float(price)
except (TypeError, ValueError):
print('set_price(): You should parse "%s" to float().' % price)
def get_price(self):
return self._price
if __name__ == '__main__':
rose = Flower('Rose', 60, 1.3)
print('%s contains %d petals costs %.2f euros.' % \
(rose.get_name(), rose.get_petals(), rose.get_price()))
"""Example with error."""
rose.set_petals('error')
|
GeorgeGkas/Data_Structures_and_Algorithms_in_Python
|
Chapter2/R-2/4.py
|
Python
|
mit
| 1,278
|
import numpy as np
def sigmoid(x):
"""Calculate sigmoid"""
return 1 / (1 + np.exp(-x))
x = np.array([0.5, 0.1, -0.2])
target = 0.6
learning_rate = 0.5
weights_input_hidden = np.array([[0.5, -0.6],
[0.1, -0.2],
[0.1, 0.7]])
weights_hidden_output = np.array([0.1, -0.3])
# forward pass
hidden_layer_input = np.dot(x, weights_input_hidden)
hidden_layer_output = sigmoid(hidden_layer_input)
output_layer_in = np.dot(hidden_layer_output, weights_hidden_output)
output = sigmoid(output_layer_in)
# backwards pass
# calculate error
error = target - output
# calculate error gradient for output layer
del_err_output = error * output * (1 - output)
# calculate error gradient for hidden layer
del_err_hidden = np.dot(del_err_output, weights_hidden_output) * hidden_layer_output * (
1 - hidden_layer_output)
# calculate change in weights for hidden layer to output layer
delta_w_h_o = learning_rate * del_err_output * hidden_layer_output
# calculate change in weights for input layer to hidden layer
delta_w_i_o = learning_rate * del_err_hidden * x[:, None]
print('Change in weights for hidden layer to output layer:')
print(delta_w_h_o)
print('Change in weights for input layer to hidden layer:')
print(delta_w_i_o)
|
Kulbear/deep-learning-nano-foundation
|
lectures/backpropagation.py
|
Python
|
mit
| 1,296
|
# -*- coding: utf-8 -*-
#
# Mock module for the Unicorn Hat package
#
# Copyright (c) 2015 carlosperate http://carlosperate.github.io
#
# Licensed under The MIT License (MIT), a copy can be found in the LICENSE file
#
# This is a simple mock module that will print to screen the unicorn hat package
# calls. It is used to be able to do development in a non-raspberry pi system,
# as the Unicorn Hat package is the python code to control an LED matrix:
# http://shop.pimoroni.com/products/unicorn-hat
#
# Only the functions called within the HardwareLamp module will be defined here,
# to be expanded as development might require it.
#
from __future__ import unicode_literals, absolute_import
verbose = True
brightness_level = 0
verbose_counter = 0
def brightness(b=0.2):
global brightness_level
brightness_level = b
if verbose is True:
print('Unicorn brightness set to: %s' % b)
def set_pixel(x, y, r, g, b):
if verbose is True:
print('Unicorn pixel set x: %s; y: %s; rgb: %s %s %s' % (x, y, r, g, b))
def show():
global verbose_counter
verbose_counter += 1
if verbose is False and (verbose_counter % 10 == 0):
print('Unicorn updated, brightness %.3f' % brightness_level)
|
carlosperate/LightUpPi-Alarm
|
LightUpHardware/unicornhatmock.py
|
Python
|
mit
| 1,231
|
import sys
import os
if len(sys.argv) < 2:
print "Please include a template to knit."
sys.exit(1)
if sys.argv[1] == 'list':
print "Available templates:\n"
print "\n".join(os.listdir('./templates'))
sys.exit(0)
try:
with open("./templates/%s" % sys.argv[1], 'r') as f:
template = f.read()
except IOError:
print "That template doesn't exist."
sys.exit(1)
lines = template.split('\n')
for x in range(5):
for line in lines:
print line * 5
|
staab/maria-code
|
knitter/main.py
|
Python
|
mit
| 493
|
from openpyxl import workbook, load_workbook
xl = load_workbook(filename='./ipaddr.xlsx', data_only=True)
xs = xl.active
col_rang = xl['Sheet1']
cell_rang = xs['A1':'A2']
#print(col_rang['A'].value)
print ('Getting Data From :', xl.get_sheet_names())
for row in xs.iter_rows():
for cell in row:
ipaddr = cell.value
#print(ipaddr)
print(file=open(ipaddr + ".txt", "a"))
|
phasedscum/python-as-a-waffle
|
Scratch Dir/Excel Testing/exceltest.py
|
Python
|
mit
| 397
|
# Write a program to check whether a given number is an ugly number.
# Ugly numbers are positive numbers whose prime factors only include 2, 3, 5.
# For example, 6, 8 are ugly while 14 is not ugly since it includes another prime factor 7.
# Note that 1 is typically treated as an ugly number.
class Solution(object):
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
if num <= 0:
return False
while (num % 5) == 0:
num = int(num/5)
while (num % 3) == 0:
num = int(num/3)
while (num % 2) == 0:
num = int(num/2)
return num == 1
|
JiangKlijna/leetcode-learning
|
py/263 Ugly Number/UglyNumber.py
|
Python
|
mit
| 660
|
from .. import Provider as BaseProvider
class Provider(BaseProvider):
"""
source: https://ja.wikipedia.org/wiki/%E8%81%B7%E6%A5%AD%E4%B8%80%E8%A6%A7
"""
jobs = [
"アイドル",
"アーティスト",
"アートディレクター",
"アナウンサー",
"アニメーター",
"医師",
"イラストレーター",
"医療事務員",
"ウェディングプランナー",
"ウェブデザイナー",
"占い師",
"運転士",
"映画監督",
"営業",
"栄養士",
"エステティシャン",
"絵本作家",
"演歌歌手",
"エンジニア" "演奏家",
"お笑い芸人",
"音楽家",
"音響技術者",
"介護ヘルパー",
"気象予報士",
"脚本家",
"救急救命士",
"行政書士",
"グラフィックデザイナー",
"経営者",
"検察官",
"ゲームクリエイター",
"建築家",
"航海士",
"コピーライター",
"高等学校教員",
"公認会計士",
"公務員",
"裁判官",
"作曲家",
"歯科医師",
"司法書士",
"小説家",
"寿司職人",
"測量士",
"大学教授",
"調理師",
"電気工事士",
"農家",
"配管工",
"バスガイド",
"花火師",
"漫画家",
"モデル",
"薬剤師",
"YouTuber",
"和紙職人",
]
|
joke2k/faker
|
faker/providers/job/ja_JP/__init__.py
|
Python
|
mit
| 1,635
|
x, y = 999, 999
pals = []
def is_palindrome(num):
strnum = str(num)
for i in range(len(strnum)/2):
if strnum[i]!=strnum[-1-i]:
return False
return True
while x>0:
while y>0:
if is_palindrome(x*y): pals.append(x*y)
y -= 1
x -= 1
y = 999
print 'palindrome is:', max(pals)
|
davidxmoody/kata
|
project-euler/completed/first-attempt/euler4.py
|
Python
|
mit
| 289
|
"""
CLI program to continually send a morse string.
Usage: test [-h] [-s c,w] <string>
where -h means print this help and stop
-s c,w means set char and word speeds
and <string> is the morse string to repeatedly send
The morse sound is created in a separate thread.
"""
import sys
import os
import getopt
import threading
sys.path.append('..')
from sound_morse import SoundMorse
# get program name from sys.argv
prog_name = sys.argv[0]
if prog_name.endswith('.py'):
prog_name = prog_name[:-3]
def usage(msg=None):
if msg:
print(f'{"*"+80}\n{msg}\n{"*"+80}\n')
print(__doc__)
def send_morse(string, sound_object):
# sound each character in the string
# we do this in this strange way so setting a global from main code will stop the thread
for ch in string:
if StopThread:
return
sound_object.send(ch)
# parse the CLI params
argv = sys.argv[1:]
try:
(opts, args) = getopt.getopt(argv, 'hs:', ['help', '--speed='])
except getopt.GetoptError as err:
usage(err)
sys.exit(1)
morse_string = ''.join(args)
cwpm = 25
wpm = 15
for (opt, param) in opts:
if opt in ['-h', '--help']:
usage()
sys.exit(0)
elif opt in ['-s', '--speed']:
speeds = param.split(',')
if len(speeds) not in (1, 2):
usage("-s option must be followed by one or two speeds, eg: '-s 20' or '- 10,5'")
cwpm = speeds[0]
wpm = cwpm
if len(speeds) == 2:
(cwpm, wpm) = speeds
cwpm = int(cwpm)
wpm = int(wpm)
morse = SoundMorse()
morse.set_speeds(cwpm=cwpm, wpm=wpm)
StopThread = False
while not StopThread:
for ch in morse_string:
try:
thread = threading.Thread(target=send_morse, args=(ch, morse))
thread.start()
thread.join()
thread = None
except KeyboardInterrupt:
StopThread = True
break
print('Stopping ...')
if thread:
thread.join()
|
rzzzwilson/morse_trainer
|
test.py
|
Python
|
mit
| 2,002
|
#!/usr/bin/env python
"""
Ask a manual question using human strings by referencing the name of a single sensor.
Also supply a sensor filter that limits the column data that is shown to values that contain Windows (which is short hand for regex match against .*Windows.*).
Also supply filter options that re-fetches any cached data that is older than 3600 seconds, matches all values supplied in the filter, and ignores case for any value match of the filter.
No sensor paramaters, question filters, or question options supplied.
"""
# import the basic python packages we need
import os
import sys
import tempfile
import pprint
import traceback
# disable python from generating a .pyc file
sys.dont_write_bytecode = True
# change me to the path of pytan if this script is not running from EXAMPLES/PYTAN_API
pytan_loc = "~/gh/pytan"
pytan_static_path = os.path.join(os.path.expanduser(pytan_loc), 'lib')
# Determine our script name, script dir
my_file = os.path.abspath(sys.argv[0])
my_dir = os.path.dirname(my_file)
# try to automatically determine the pytan lib directory by assuming it is in '../../lib/'
parent_dir = os.path.dirname(my_dir)
pytan_root_dir = os.path.dirname(parent_dir)
lib_dir = os.path.join(pytan_root_dir, 'lib')
# add pytan_loc and lib_dir to the PYTHONPATH variable
path_adds = [lib_dir, pytan_static_path]
[sys.path.append(aa) for aa in path_adds if aa not in sys.path]
# import pytan
import pytan
# create a dictionary of arguments for the pytan handler
handler_args = {}
# establish our connection info for the Tanium Server
handler_args['username'] = "Administrator"
handler_args['password'] = "Tanium2015!"
handler_args['host'] = "10.0.1.240"
handler_args['port'] = "443" # optional
handler_args['trusted_certs'] = "certs"
# optional, level 0 is no output except warnings/errors
# level 1 through 12 are more and more verbose
handler_args['loglevel'] = 1
# optional, use a debug format for the logging output (uses two lines per log entry)
handler_args['debugformat'] = False
# optional, this saves all response objects to handler.session.ALL_REQUESTS_RESPONSES
# very useful for capturing the full exchange of XML requests and responses
handler_args['record_all_requests'] = True
# instantiate a handler using all of the arguments in the handler_args dictionary
print "...CALLING: pytan.handler() with args: {}".format(handler_args)
handler = pytan.Handler(**handler_args)
# print out the handler string
print "...OUTPUT: handler string: {}".format(handler)
# setup the arguments for the handler() class
kwargs = {}
kwargs["sensors"] = u'Operating System, that contains:Windows, opt:match_all_values, opt:ignore_case, opt:max_data_age:3600'
kwargs["qtype"] = u'manual'
print "...CALLING: handler.ask with args: {}".format(kwargs)
response = handler.ask(**kwargs)
print "...OUTPUT: Type of response: ", type(response)
print "...OUTPUT: Pretty print of response:"
print pprint.pformat(response)
print "...OUTPUT: Equivalent Question if it were to be asked in the Tanium Console: "
print response['question_object'].query_text
if response['question_results']:
# call the export_obj() method to convert response to CSV and store it in out
export_kwargs = {}
export_kwargs['obj'] = response['question_results']
export_kwargs['export_format'] = 'csv'
print "...CALLING: handler.export_obj() with args {}".format(export_kwargs)
out = handler.export_obj(**export_kwargs)
# trim the output if it is more than 15 lines long
if len(out.splitlines()) > 15:
out = out.splitlines()[0:15]
out.append('..trimmed for brevity..')
out = '\n'.join(out)
print "...OUTPUT: CSV Results of response: "
print out
'''STDOUT from running this:
...CALLING: pytan.handler() with args: {'username': 'Administrator', 'record_all_requests': True, 'loglevel': 1, 'debugformat': False, 'host': '10.0.1.240', 'password': 'Tanium2015!', 'port': '443'}
...OUTPUT: handler string: PyTan v2.1.4 Handler for Session to 10.0.1.240:443, Authenticated: True, Platform Version: 6.5.314.4301
...CALLING: handler.ask with args: {'sensors': u'Operating System, that contains:Windows, opt:match_all_values, opt:ignore_case, opt:max_data_age:3600', 'qtype': u'manual'}
2015-09-14 20:13:34,606 INFO pytan.pollers.QuestionPoller: ID 808: Reached Threshold of 99% (3 of 3)
...OUTPUT: Type of response: <type 'dict'>
...OUTPUT: Pretty print of response:
{'poller_object': <pytan.pollers.QuestionPoller object at 0x11d6ab590>,
'poller_success': True,
'question_object': <taniumpy.object_types.question.Question object at 0x11d6abe90>,
'question_results': <taniumpy.object_types.result_set.ResultSet object at 0x11d6aa410>}
...OUTPUT: Equivalent Question if it were to be asked in the Tanium Console:
Get Operating System containing "Windows" from all machines
...CALLING: handler.export_obj() with args {'export_format': 'csv', 'obj': <taniumpy.object_types.result_set.ResultSet object at 0x11d6aa410>}
...OUTPUT: CSV Results of response:
Operating System
[no results]
Windows Server 2008 R2 Standard
Windows Server 2012 R2 Standard
'''
'''STDERR from running this:
'''
|
tanium/pytan
|
EXAMPLES/PYTAN_API/ask_manual_question_sensor_with_filter_and_3_options.py
|
Python
|
mit
| 5,155
|
from main import app, db
from main.security import security, user_datastore
from flask.ext.blogging import SQLAStorage, BloggingEngine
# configure the bloggin storeage for the blog.db
# using multiple database bound to sqlalchemy
storage = SQLAStorage(db=db, bind="blog")
# create all the tables
db.create_all()
# start the blogging engine
blogging_engine = BloggingEngine(app, storage)
# how the blogging engine identifies a user
# pulling user from flask-security user data store
@blogging_engine.user_loader
def load_user(userid):
print("load_user:",user_datastore.get_user(userid))
return user_datastore.get_user(userid)
# post configuration
def configure_blogging():
print("configure:",storage)
|
slippers/blogging_security
|
main/blogging/__init__.py
|
Python
|
mit
| 718
|
#!flask/bin/python
from request_logger import app
app.run(host='0.0.0.0')
|
i1caro/request_logger
|
run.py
|
Python
|
mit
| 75
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.views.generic import View, ListView, DetailView
from .models import Living, LivingType
from endless_pagination.views import AjaxListView
class LivingList(AjaxListView):
model = Living
# queryset = Living.objects.filter(is_publish=True)#.order_by('weight')
queryset = Living.objects.all()
# paginate_by = 8
template_name = 'living/living_list.html'
page_template = 'living/living_page.html'
default_filter_param = 'all'
def get_queryset(self):
qs = super(LivingList, self).get_queryset()
dist = self.request.GET.get('dist')
p_type = self.request.GET.get('type')
if dist:
if '-' not in dist:
if dist == "1":
qs = qs.filter(distance__lt=1.00)
elif dist == "2":
qs = qs.filter(distance__gt=1.00, distance__lt=10.01)
elif dist == "3":
qs = qs.filter(distance__gt=10.00, distance__lt=30.01)
elif dist == "4":
qs = qs.filter(distance__gt=30.00, distance__lt=50.01)
elif dist == "5":
qs = qs.filter(distance__gt=50.00, distance__lt=100.01)
elif dist == "6":
qs = qs.filter(distance__gt=100.00)
if p_type:
qs = qs.filter(type_of_living__slug=p_type) #
return qs
def get_context_data(self, **kwargs):
context = super(LivingList, self).get_context_data(**kwargs)
# context['p_types'] = PlaceType.objects.all()
dist = self.request.GET.get('dist')
if dist:
if '-' not in dist:
context['dist'] = dist
pt = self.request.GET.get('type', None)
context['type_active'] = pt if pt else 'all'
return context
class LivingDetail(DetailView):
model = Living
template_name = 'living/living_detail.html'
|
Guest007/vgid
|
apps/living/views.py
|
Python
|
mit
| 1,978
|
'''
Lista 3b - Exercício 3
Verifique se um inteiro positivo n é primo.
Felipe Nogueira de Souza
Twitter: @_outrofelipe
'''
n = int(input('Informe um número inteiro positivo: '))
i = 2
primo = True
while i <= n - 1:
if n % i == 0:
primo = False
break
i += 1
if primo:
print('O número %d é primo!' %n)
else:
print('O número %d NÃO é primo!' %n)
|
outrofelipe/Python-para-zumbis
|
lista-3b/03_primo.py
|
Python
|
mit
| 386
|
from racks import __version__
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
dependencies = ['docopt', 'termcolor']
def publish():
os.system("python setup.py sdist upload")
if sys.argv[-1] == "publish":
publish()
sys.exit()
setup(
name='racks',
version=".".join(str(x) for x in __version__),
description='racks for stack graphs',
long_description=open('README.rst').read(),
url='http://www.github.com/myusuf3/racks',
license="MIT License",
author='Mahdi Yusuf',
author_email='yusuf.mahdi@gmail.com',
install_requires=dependencies,
packages=['racks', ],
entry_points={
'console_scripts': [
'racks=racks.main:start'
],
},
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
),
)
|
myusuf3/racks
|
setup.py
|
Python
|
mit
| 1,126
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The configs waiters delete command."""
from googlecloudsdk.api_lib.runtime_config import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.runtime_config import flags
from googlecloudsdk.core import log
class Delete(base.DeleteCommand):
"""Delete waiter resources.
This command deletes the waiter resource with the specified name.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To delete a waiter named "my-waiter" within a configuration named
"my-config", run:
$ {command} my-waiter --config-name my-config
""",
}
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
flags.AddConfigFlag(parser)
parser.add_argument('name', help='The waiter name.')
def Collection(self):
"""Returns the default collection path string.
Returns:
The default collection path string.
"""
return 'runtimeconfig.waiters'
def Run(self, args):
"""Run 'runtime-configs waiters delete'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Raises:
HttpException: An http error response was received while executing api
request.
"""
waiter_client = util.WaiterClient()
messages = util.Messages()
waiter_resource = util.ParseWaiterName(args.name, args)
waiter_client.Delete(
messages.RuntimeconfigProjectsConfigsWaitersDeleteRequest(
name=waiter_resource.RelativeName(),
)
)
log.DeletedResource(waiter_resource)
|
Sorsly/subtle
|
google-cloud-sdk/lib/surface/runtime_config/configs/waiters/delete.py
|
Python
|
mit
| 2,419
|
""" Statusdb module"""
__version__ = "1.0.0"
|
SciLifeLab/statusdb
|
statusdb/__init__.py
|
Python
|
mit
| 45
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.md')) as f:
CHANGES = f.read()
requires = [
'arrow',
'celery',
'psycopg2>=2.7.0', # register_ipaddress
'pyramid_jinja2',
'pyramid_tm',
'waitress',
'zope.sqlalchemy',
]
setup(name='wanmap',
version='0.0',
description='A distributed nmap web application',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='Brad Walker',
author_email='brad@bradmwalker.com',
url='https://wanmap.org',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="wanmap",
entry_points="""\
[paste.app_factory]
main = wanmap:main
[console_scripts]
initialize_wanmap_db = wanmap.scripts.initializedb:main
""",
)
|
bradmwalker/wanmap
|
setup.py
|
Python
|
mit
| 1,279
|
"""
Created on 2013-1-19
@author: Administrator
"""
import urllib.request
import smtplib
for line in urllib.request.urlopen('http://www.baidu.com'):
line = line.decode('gb2312')
print(line)
server = smtplib.SMTP('localhost')
server.sendmail('quchunguang@example.org', 'quchunguang@gmail.com',
"""To: quchunguang@example.org
From: quchunguang@gmail.com
Beware the Ides of March.
""")
server.quit()
|
quchunguang/test
|
testpy3/testinternet.py
|
Python
|
mit
| 408
|
#!python
# encoding: utf-8
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""Code Intelligence: utility functions"""
import bisect
import os
from os.path import basename
import sys
import re
import stat
import textwrap
import logging
import types
from pprint import pprint, pformat
import time
import codecs
# Global dict for holding specific hotshot profilers
hotshotProfilers = {}
#---- general stuff
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
# A "safe" language name for the given language where safe generally
# means safe for a file path.
_safe_lang_from_lang_cache = {
"C++": "cpp",
}
def safe_lang_from_lang(lang):
global _safe_lang_from_lang_cache
try:
return _safe_lang_from_lang_cache[lang]
except KeyError:
safe_lang = lang.lower().replace(' ', '_')
_safe_lang_from_lang_cache[lang] = safe_lang
return safe_lang
# @deprecated: Manager.buf_from_path now uses textinfo to guess lang.
def guess_lang_from_path(path):
lang_from_ext = {
".py": "Python",
".pl": "Perl",
".pm": "Perl",
".tcl": "Tcl",
".php": "PHP",
".inc": "PHP",
".rb": "Ruby",
".rhtml": "RHTML",
".html.erb": "RHTML",
".js": "JavaScript",
".java": "Java",
".css": "CSS",
".xul": "XUL",
".xbl": "XBL",
".html": "HTML",
".xml": "XML",
".tpl": "Smarty",
".django.html": "Django",
".mason.html": "Mason",
".ttkt.html": "TemplateToolkit",
".cxx": "C++",
}
idx = 0
base = basename(path)
while base.find('.', idx) != -1:
idx = base.find('.', idx)
if idx == -1:
break
ext = base[idx:]
if ext in lang_from_ext:
return lang_from_ext[ext]
idx += 1
from codeintel2.common import CodeIntelError
raise CodeIntelError("couldn't guess lang for `%s'" % path)
def gen_dirs_under_dirs(dirs, max_depth, interesting_file_patterns=None,
skip_scc_control_dirs=True):
"""Generate all dirs under the given dirs (including the given dirs
themselves).
"max_depth" is an integer maximum number of sub-directories that
this method with recurse.
"file_patterns", if given, is a sequence of glob patterns for
"interesting" files. Directories with no interesting files are
not included (though sub-directories of these may be).
"skip_scc_control_dirs" is a boolean (default True) indicating if
svn and cvs control dirs should be skipped.
"""
from os.path import normpath, abspath, expanduser
from fnmatch import fnmatch
dirs_to_skip = (skip_scc_control_dirs
and ["CVS", ".svn", ".hg", ".git", ".bzr"] or [])
# We must keep track of the directories we have walked, as the list of dirs
# can overlap - bug 90289.
walked_these_dirs = {}
for dir in dirs:
norm_dir = normpath(abspath(expanduser(dir)))
LEN_DIR = len(norm_dir)
for dirpath, dirnames, filenames in walk2(norm_dir):
if dirpath in walked_these_dirs:
dirnames[:] = [] # Already walked - no need to do it again.
continue
if dirpath[LEN_DIR:].count(os.sep) >= max_depth:
dirnames[:] = [] # hit max_depth
else:
walked_these_dirs[dirpath] = True
for dir_to_skip in dirs_to_skip:
if dir_to_skip in dirnames:
dirnames.remove(dir_to_skip)
if interesting_file_patterns:
for pat, filename in (
(p, f) for p in interesting_file_patterns
for f in filenames):
if fnmatch(filename, pat):
break
else:
# No interesting files in this dir.
continue
yield dirpath
#---- standard module/class/function doc parsing
LINE_LIMIT = 5 # limit full number of lines this number
LINE_WIDTH = 60 # wrap doc summaries to this width
# Examples matches to this pattern:
# foo(args)
# foo(args) -> retval
# foo(args) -- description
# retval = foo(args)
# retval = foo(args) -- description
_gPySigLinePat = re.compile(
r"^((?P<retval>[^=]+?)\s*=|class)?\s*(?P<head>[\w\.]+\s?\(.*?\))\s*(?P<sep>[:<>=-]*)\s*(?P<tail>.*)$")
_gSentenceSepPat = re.compile(r"(?<=\.)\s+", re.M) # split on sentence bndry
def parseDocSummary(doclines, limit=LINE_LIMIT, width=LINE_WIDTH):
"""Parse out a short summary from the given doclines.
"doclines" is a list of lines (without trailing newlines) to parse.
"limit" is the number of lines to which to limit the summary.
The "short summary" is the first sentence limited by (1) the "limit"
number of lines and (2) one paragraph. If the first *two* sentences fit
on the first line, then use both. Returns a list of summary lines.
"""
# Skip blank lines.
start = 0
while start < len(doclines):
if doclines[start].strip():
break
start += 1
desclines = []
for i in range(start, len(doclines)):
if len(desclines) >= limit:
break
stripped = doclines[i].strip()
if not stripped:
break
sentences = _gSentenceSepPat.split(stripped)
if sentences and not sentences[-1].endswith('.'):
del sentences[-1] # last bit might not be a complete sentence
if not sentences:
desclines.append(stripped + ' ')
continue
elif i == start and len(sentences) > 1:
desclines.append(' '.join([s.strip() for s in sentences[:2]]))
else:
desclines.append(sentences[0].strip())
break
if desclines:
if desclines[-1][-1] == ' ':
# If terminated at non-sentence boundary then have extraneous
# trailing space.
desclines[-1] = desclines[-1][:-1]
desclines = textwrap.wrap(''.join(desclines), width)
return desclines
def parsePyFuncDoc(doc, fallbackCallSig=None, scope="?", funcname="?"):
"""Parse the given Python function/method doc-string into call-signature
and description bits.
"doc" is the function doc string.
"fallbackCallSig" (optional) is a list of call signature lines to
fallback to if one cannot be determined from the doc string.
"scope" (optional) is the module/class parent scope name. This
is just used for better error/log reporting.
"funcname" (optional) is the function name. This is just used for
better error/log reporting.
Examples of doc strings with call-signature info:
close(): explicitly release resources held.
x.__repr__() <==> repr(x)
read([s]) -- Read s characters, or the rest of the string
recv(buffersize[, flags]) -> data
replace (str, old, new[, maxsplit]) -> string
class StringIO([buffer])
Returns a 2-tuple: (<call-signature-lines>, <description-lines>)
"""
if doc is None or not doc.strip():
return ([], [])
limit = LINE_LIMIT
if not isinstance(doc, unicode):
# try to convert from utf8 to unicode; if we fail, too bad.
try:
doc = codecs.utf_8_decode(doc)[0]
except UnicodeDecodeError:
pass
doclines = doc.splitlines(0)
index = 0
siglines = []
desclines = []
# Skip leading blank lines.
while index < len(doclines):
if doclines[index].strip():
break
index += 1
# Parse out the call signature block, if it looks like there is one.
if index >= len(doclines):
match = None
else:
first = doclines[index].strip()
match = _gPySigLinePat.match(first)
if match:
# The 'doc' looks like it starts with a call signature block.
for i, line in enumerate(doclines[index:]):
if len(siglines) >= limit:
index = i
break
stripped = line.strip()
if not stripped:
index = i
break
match = _gPySigLinePat.match(stripped)
if not match:
index = i
break
# Now parse off what may be description content on the same line.
# ":", "-" or "--" separator: tail is description
# "-->" or "->" separator: tail if part of call sig
# "<==>" separator: tail if part of call sig
# other separtor: leave as part of call sig for now
descSeps = ("-", "--", ":")
groupd = match.groupdict()
retval, head, sep, tail = (
groupd.get("retval"), groupd.get("head"),
groupd.get("sep"), groupd.get("tail"))
if retval:
siglines.append(head + " -> " + retval)
if tail and sep in descSeps:
desclines.append(tail)
elif tail and sep in descSeps:
siglines.append(head)
desclines.append(tail)
else:
siglines.append(stripped)
else:
index = len(doclines)
if not siglines and fallbackCallSig:
siglines = fallbackCallSig
# Parse out the description block.
if desclines:
# Use what we have already. Just need to wrap it.
desclines = textwrap.wrap(' '.join(desclines), LINE_WIDTH)
else:
doclines = doclines[index:]
# strip leading empty lines
while len(doclines) > 0 and not doclines[0].rstrip():
del doclines[0]
try:
skip_first_line = (doclines[0][0] not in (" \t"))
except IndexError:
skip_first_line = False # no lines, or first line is empty
desclines = dedent("\n".join(
doclines), skip_first_line=skip_first_line)
desclines = desclines.splitlines(0)
## debug logging
# f = open("parsePyFuncDoc.log", "a")
# if 0:
# f.write("\n---- %s:\n" % funcname)
# f.write(pformat(siglines)+"\n")
# f.write(pformat(desclines)+"\n")
# else:
# f.write("\n")
# if siglines:
# f.write("\n".join(siglines)+"\n")
# else:
# f.write("<no signature for '%s.%s'>\n" % (scope, funcname))
# for descline in desclines:
# f.write("\t%s\n" % descline)
# f.close()
return (siglines, desclines)
#---- debugging utilities
def unmark_text(markedup_text):
u"""Parse text with potential markup as follows and return
(<text>, <data-dict>).
"<|>" indicates the current position (pos), defaults to the end
of the text.
"<+>" indicates the trigger position (trg_pos), if present.
"<$>" indicates the start position (start_pos) for some kind of
of processing, if present.
"<N>" is a numbered marker. N can be any of 0-99. These positions
are returned as the associate number key in <data-dict>.
Note that the positions are in UTF-8 byte counts, not character counts.
This matches the behaviour of Scintilla positions.
E.g.:
>>> unmark_text('foo.<|>')
('foo.', {'pos': 4})
>>> unmark_text('foo.<|><+>')
('foo.', {'trg_pos': 4, 'pos': 4})
>>> unmark_text('foo.<+>ba<|>')
('foo.ba', {'trg_pos': 4, 'pos': 6})
>>> unmark_text('fo<|>o.<+>ba')
('foo.ba', {'trg_pos': 4, 'pos': 2})
>>> unmark_text('os.path.join<$>(<|>')
('os.path.join(', {'pos': 13, 'start_pos': 12})
>>> unmark_text('abc<3>defghi<2>jk<4>lm<1>nopqrstuvwxyz')
('abcdefghijklmnopqrstuvwxyz', {1: 13, 2: 9, 3: 3, 4: 11, 'pos': 26})
>>> unmark_text('Ůɳíčóďé<|>')
('Ůɳíčóďé', {'pos': 14})
See the matching markup_text() below.
"""
splitter = re.compile(r"(<(?:[\|\+\$\[\]<]|\d+)>)")
text = u"" if isinstance(markup_text, unicode) else ""
data = {}
posNameFromSymbol = {
"<|>": "pos",
"<+>": "trg_pos",
"<$>": "start_pos",
"<[>": "start_selection",
"<]>": "end_selection",
}
def byte_length(text):
if isinstance(text, unicode):
return len(text.encode("utf-8"))
return len(text)
bracketed_digits_re = re.compile(r'<\d+>$')
for token in splitter.split(markedup_text):
if token in posNameFromSymbol:
data[posNameFromSymbol[token]] = byte_length(text)
elif token == "<<>": # escape sequence
text += "<"
elif bracketed_digits_re.match(token):
data[int(token[1:-1])] = byte_length(text)
else:
text += token
if "pos" not in data:
data["pos"] = byte_length(text)
# sys.stderr.write(">> text:%r, data:%s\n" % (text, data))
return text, data
def markup_text(text, pos=None, trg_pos=None, start_pos=None):
"""Markup text with position markers.
See the matching unmark_text() above.
"""
positions_and_markers = []
if pos is not None:
positions_and_markers.append((pos, '<|>'))
if trg_pos is not None:
positions_and_markers.append((trg_pos, '<+>'))
if start_pos is not None:
positions_and_markers.append((start_pos, '<$>'))
positions_and_markers.sort()
if not isinstance(text, bytes):
text = text.encode("utf-8")
m_text = ""
m_pos = 0
for position, marker in positions_and_markers:
m_text += text[m_pos:position].decode('utf-8', 'ignore') + marker
m_pos = position
m_text += text[m_pos:].decode('utf-8', 'ignore')
return m_text
def lines_from_pos(unmarked_text, positions):
"""Get 1-based line numbers from positions
@param unmarked_text {str} The text to examine
@param positions {dict or list of int} Byte positions to look up
@returns {dict or list of int} Matching line numbers (1-based)
Given some text and either a list of positions, or a dict containing
positions as values, return a matching data structure with positions
replaced with the line number of the lines the positions are on. Positions
after the last line are assumed to be on a hypothetical line.
E.g.:
Assuming the following text with \n line endings, where each line is
exactly 20 characters long:
>>> text = '''
... line one
... line two
... line three
... '''.lstrip()
>>> lines_from_pos(text, [5, 15, 25, 55, 999])
[1, 1, 2, 3, 4]
>>> lines_from_pos(text, {"hello": 10, "moo": 20, "not": "an int"})
{'moo': 1, 'hello': 1}
"""
lines = unicode(unmarked_text).splitlines(True)
offsets = [0]
for line in lines:
offsets.append(offsets[-1] + len(line.encode("utf-8")))
try:
# assume a dict
keys = positions.iterkeys()
values = {}
except AttributeError:
# assume a list/tuple
keys = range(len(positions))
values = []
for key in keys:
try:
position = positions[key] - 0
except TypeError:
continue # not a number
line_no = bisect.bisect_left(offsets, position)
try:
values[key] = line_no
except IndexError:
if key == len(values):
values.append(line_no)
else:
raise
return values
# Recipe: banner (1.0.1) in C:\trentm\tm\recipes\cookbook
def banner(text, ch='=', length=78):
"""Return a banner line centering the given text.
"text" is the text to show in the banner. None can be given to have
no text.
"ch" (optional, default '=') is the banner line character (can
also be a short string to repeat).
"length" (optional, default 78) is the length of banner to make.
Examples:
>>> banner("Peggy Sue")
'================================= Peggy Sue =================================='
>>> banner("Peggy Sue", ch='-', length=50)
'------------------- Peggy Sue --------------------'
>>> banner("Pretty pretty pretty pretty Peggy Sue", length=40)
'Pretty pretty pretty pretty Peggy Sue'
"""
if text is None:
return ch * length
elif len(text) + 2 + len(ch)*2 > length:
# Not enough space for even one line char (plus space) around text.
return text
else:
remain = length - (len(text) + 2)
prefix_len = remain / 2
suffix_len = remain - prefix_len
if len(ch) == 1:
prefix = ch * prefix_len
suffix = ch * suffix_len
else:
prefix = ch * (prefix_len/len(ch)) + ch[:prefix_len % len(ch)]
suffix = ch * (suffix_len/len(ch)) + ch[:suffix_len % len(ch)]
return prefix + ' ' + text + ' ' + suffix
# Recipe: dedent (0.1.2) in C:\trentm\tm\recipes\cookbook
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line)
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line:
continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG:
print "dedent: indent=%d: %r" % (indent, line)
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG:
print "dedent: margin=%r" % margin
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line:
continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG:
print "dedent: %r: EOL -> strip up to EOL" % line
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
print "dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin)
if removed == margin:
lines[i] = lines[i][j+1:]
break
elif removed > margin:
lines[i] = ' '*(removed-margin) + lines[i][j+1:]
break
else:
if removed:
lines[i] = lines[i][removed:]
return lines
def dedent(text, tabsize=8, skip_first_line=False):
"""dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines)
# Recipe: indent (0.2.1) in C:\trentm\tm\recipes\cookbook
def indent(s, width=4, skip_first_line=False):
"""indent(s, [width=4]) -> 's' indented by 'width' spaces
The optional "skip_first_line" argument is a boolean (default False)
indicating if the first line should NOT be indented.
"""
lines = s.splitlines(1)
indentstr = ' '*width
if skip_first_line:
return indentstr.join(lines)
else:
return indentstr + indentstr.join(lines)
def walk2(top, topdown=True, onerror=None, followlinks=False,
ondecodeerror=None):
"""A version of `os.walk` that adds support for handling errors for
files that cannot be decoded with the default encoding. (See bug 82268.)
By default `UnicodeDecodeError`s from the os.listdir() call are
ignored. If optional arg 'ondecodeerror' is specified, it should be a
function; it will be called with one argument, the `UnicodeDecodeError`
instance. It can report the error to continue with the walk, or
raise the exception to abort the walk.
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = os.listdir(top)
except os.error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
try:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
except UnicodeDecodeError, err:
if ondecodeerror is not None:
ondecodeerror(err)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not islink(path):
for x in walk2(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
# Decorators useful for timing and profiling specific functions.
#
# timeit usage:
# Decorate the desired function and you'll get a print for how long
# each call to the function took.
#
# hotspotit usage:
# 1. decorate the desired function
# 2. run your code
# 3. run:
# python .../codeintel/support/show_stats.py .../<funcname>.prof
#
def timeit(func):
clock = (sys.platform == "win32" and time.clock or time.time)
def wrapper(*args, **kw):
start_time = clock()
try:
return func(*args, **kw)
finally:
total_time = clock() - start_time
print "%s took %.3fs" % (func.func_name, total_time)
return wrapper
def hotshotit(func):
def wrapper(*args, **kw):
import hotshot
global hotshotProfilers
prof_name = func.func_name+".prof"
profiler = hotshotProfilers.get(prof_name)
if profiler is None:
profiler = hotshot.Profile(prof_name)
hotshotProfilers[prof_name] = profiler
return profiler.runcall(func, *args, **kw)
return wrapper
_koCProfiler = None
def getProfiler():
global _koCProfiler
if _koCProfiler is None:
class _KoCProfileManager(object):
def __init__(self):
import atexit
import cProfile
from codeintel2.common import _xpcom_
self.prof = cProfile.Profile()
if _xpcom_:
from xpcom import components
_KoCProfileManager._com_interfaces_ = [
components.interfaces.nsIObserver]
obsSvc = components.classes["@mozilla.org/observer-service;1"].\
getService(
components.interfaces.nsIObserverService)
obsSvc.addObserver(self, 'xpcom-shutdown', False)
else:
atexit.register(self.atexit_handler)
def atexit_handler(self):
self.prof.print_stats(sort="time")
def observe(self, subject, topic, data):
if topic == "xpcom-shutdown":
self.atexit_handler()
_koCProfiler = _KoCProfileManager()
return _koCProfiler.prof
def profile_method(func):
def wrapper(*args, **kw):
return getProfiler().runcall(func, *args, **kw)
return wrapper
# Utility functions to perform sorting the same way as scintilla does it
# for the code-completion list.
def OrdPunctLast(value):
result = []
value = value.upper()
for ch in value:
i = ord(ch)
if i >= 0x21 and i <= 0x2F: # ch >= '!' && ch <= '/'
result.append(chr(i - ord("!") + ord('['))) # ch - '!' + '['
elif i >= 0x3A and i <= 0x40: # ch >= ':' && ch <= '@'
result.append(chr(i - ord(":") + ord('['))) # ch - ':' + '['
else:
result.append(ch)
return "".join(result)
def CompareNPunctLast(value1, value2):
# value 1 is smaller, return negative
# value 1 is equal, return 0
# value 1 is larger, return positive
return cmp(OrdPunctLast(value1), OrdPunctLast(value2))
# Utility function to make a lookup dictionary
def make_short_name_dict(names, length=3):
outdict = {}
for name in names:
if len(name) >= length:
shortname = name[:length]
l = outdict.get(shortname)
if not l:
outdict[shortname] = [name]
else:
l.append(name)
# pprint(outdict)
for values in outdict.values():
values.sort(CompareNPunctLast)
return outdict
def makePerformantLogger(logger):
"""Replaces the info() and debug() methods with dummy methods.
Assumes that the logging level does not change during runtime.
"""
if not logger.isEnabledFor(logging.INFO):
def _log_ignore(self, *args, **kwargs):
pass
logger.info = _log_ignore
if not logger.isEnabledFor(logging.DEBUG):
logger.debug = _log_ignore
#---- mainline self-test
if __name__ == "__main__":
import doctest
doctest.testmod()
|
herove/dotfiles
|
sublime/Packages/SublimeCodeIntel/libs/codeintel2/util.py
|
Python
|
mit
| 28,763
|
"""Main Fabric deployment file for CloudBioLinux distribution.
This installs a standard set of useful biological applications on a remote
server. It is designed for bootstrapping a machine from scratch, as with new
Amazon EC2 instances.
Usage:
fab -H hostname -i private_key_file install_biolinux
which will call into the 'install_biolinux' method below. See the README for
more examples. hostname can be a named host in ~/.ssh/config
Requires:
Fabric http://docs.fabfile.org
PyYAML http://pyyaml.org/wiki/PyYAMLDocumentation
"""
import os
import sys
from datetime import datetime
from fabric.api import *
from fabric.contrib.files import *
import yaml
# use local cloudbio directory
for to_remove in [p for p in sys.path if p.find("cloudbiolinux-") > 0]:
sys.path.remove(to_remove)
sys.path.append(os.path.dirname(__file__))
import cloudbio
from cloudbio import libraries
from cloudbio.utils import _setup_logging, _configure_fabric_environment
from cloudbio.cloudman import _cleanup_ec2, _configure_cloudman
from cloudbio.cloudbiolinux import _cleanup_space, _freenx_scripts
from cloudbio.custom import shared
from cloudbio.package.shared import _yaml_to_packages
from cloudbio.package import brew, conda
from cloudbio.package import (_configure_and_install_native_packages,
_connect_native_packages, _print_shell_exports)
from cloudbio.package.nix import _setup_nix_sources, _nix_packages
from cloudbio.flavor.config import get_config_file
from cloudbio.config_management.puppet import _puppet_provision
from cloudbio.config_management.chef import _chef_provision, chef, _configure_chef
# ### Shared installation targets for all platforms
def install_biolinux(target=None, flavor=None):
"""Main entry point for installing BioLinux on a remote server.
`flavor` allows customization of CloudBioLinux behavior. It can either
be a flavor name that maps to a corresponding directory in contrib/flavor
or the path to a custom directory. This can contain:
- alternative package lists (main.yaml, packages.yaml, custom.yaml)
- custom python code (nameflavor.py) that hooks into the build machinery
`target` allows running only particular parts of the build process. Valid choices are:
- packages Install distro packages
- custom Install custom packages
- chef_recipes Provision chef recipes
- libraries Install programming language libraries
- post_install Setup CloudMan, FreeNX and other system services
- cleanup Remove downloaded files and prepare images for AMI builds
"""
_setup_logging(env)
time_start = _print_time_stats("Config", "start")
_check_fabric_version()
if env.ssh_config_path and os.path.isfile(os.path.expanduser(env.ssh_config_path)):
env.use_ssh_config = True
_configure_fabric_environment(env, flavor,
ignore_distcheck=(target is not None
and target in ["libraries", "custom"]))
env.logger.debug("Target is '%s'" % target)
env.logger.debug("Flavor is '%s'" % flavor)
_perform_install(target, flavor)
_print_time_stats("Config", "end", time_start)
if hasattr(env, "keep_isolated") and env.keep_isolated:
_print_shell_exports(env)
def _perform_install(target=None, flavor=None, more_custom_add=None):
"""
Once CBL/fabric environment is setup, this method actually
runs the required installation procedures.
See `install_biolinux` for full details on arguments
`target` and `flavor`.
"""
pkg_install, lib_install, custom_ignore, custom_add = _read_main_config()
if more_custom_add:
if custom_add is None:
custom_add = {}
for k, vs in more_custom_add.iteritems():
if k in custom_add:
custom_add[k].extend(vs)
else:
custom_add[k] = vs
if target is None or target == "packages":
env.keep_isolated = getattr(env, "keep_isolated", "false").lower() in ["true", "yes"]
# Only touch system information if we're not an isolated installation
if not env.keep_isolated:
# can only install native packages if we have sudo access or are root
if env.use_sudo or env.safe_run_output("whoami").strip() == "root":
_configure_and_install_native_packages(env, pkg_install)
else:
_connect_native_packages(env, pkg_install, lib_install)
if env.nixpkgs: # ./doc/nixpkgs.md
_setup_nix_sources()
_nix_packages(pkg_install)
if target is None or target == "custom":
_custom_installs(pkg_install, custom_ignore, custom_add)
if target is None or target == "chef_recipes":
_provision_chef_recipes(pkg_install, custom_ignore)
if target is None or target == "puppet_classes":
_provision_puppet_classes(pkg_install, custom_ignore)
if target is None or target == "brew":
install_brew(flavor=flavor, automated=True)
if target is None or target == "conda":
install_conda(flavor=flavor, automated=True)
if target is None or target == "libraries":
_do_library_installs(lib_install)
if target is None or target == "post_install":
env.flavor.post_install()
if "is_ec2_image" in env and env.is_ec2_image.upper() in ["TRUE", "YES"]:
_freenx_scripts(env)
if pkg_install is not None and 'cloudman' in pkg_install:
_configure_cloudman(env)
if target is None or target == "cleanup":
if env.use_sudo:
_cleanup_space(env)
if "is_ec2_image" in env and env.is_ec2_image.upper() in ["TRUE", "YES"]:
_cleanup_ec2(env)
def _print_time_stats(action, event, prev_time=None):
""" A convenience method for displaying time event during configuration.
:type action: string
:param action: Indicates type of action (eg, Config, Lib install, Pkg install)
:type event: string
:param event: The monitoring event (eg, start, stop)
:type prev_time: datetime
:param prev_time: A timeststamp of a previous event. If provided, duration between
the time the method is called and the time stamp is included in
the printout
:rtype: datetime
:return: A datetime timestamp of when the method was called
"""
time = datetime.utcnow()
s = "{0} {1} time: {2}".format(action, event, time)
if prev_time: s += "; duration: {0}".format(str(time-prev_time))
env.logger.info(s)
return time
def _check_fabric_version():
"""Checks for fabric version installed
"""
version = env.version
if int(version.split(".")[0]) < 1:
raise NotImplementedError("Please install fabric version 1 or higher")
def _custom_installs(to_install, ignore=None, add=None):
if not env.safe_exists(env.local_install) and env.local_install:
env.safe_run("mkdir -p %s" % env.local_install)
pkg_config = get_config_file(env, "custom.yaml").base
packages, pkg_to_group = _yaml_to_packages(pkg_config, to_install)
packages = [p for p in packages if ignore is None or p not in ignore]
if add is not None:
for key, vals in add.iteritems():
for v in vals:
pkg_to_group[v] = key
packages.append(v)
for p in env.flavor.rewrite_config_items("custom", packages):
install_custom(p, True, pkg_to_group)
def _provision_chef_recipes(to_install, ignore=None):
"""
Much like _custom_installs, read config file, determine what to install,
and install it.
"""
pkg_config = get_config_file(env, "chef_recipes.yaml").base
packages, _ = _yaml_to_packages(pkg_config, to_install)
packages = [p for p in packages if ignore is None or p not in ignore]
recipes = [recipe for recipe in env.flavor.rewrite_config_items("chef_recipes", packages)]
if recipes: # Don't bother running chef if nothing to configure
install_chef_recipe(recipes, True)
def _provision_puppet_classes(to_install, ignore=None):
"""
Much like _custom_installs, read config file, determine what to install,
and install it.
"""
pkg_config = get_config_file(env, "puppet_classes.yaml").base
packages, _ = _yaml_to_packages(pkg_config, to_install)
packages = [p for p in packages if ignore is None or p not in ignore]
classes = [recipe for recipe in env.flavor.rewrite_config_items("puppet_classes", packages)]
if classes: # Don't bother running chef if nothing to configure
install_puppet_class(classes, True)
def install_chef_recipe(recipe, automated=False, flavor=None):
"""Install one or more chef recipes by name.
Usage: fab [-i key] [-u user] -H host install_chef_recipe:recipe
:type recipe: string or list
:param recipe: TODO
:type automated: bool
:param automated: If set to True, the environment is not loaded.
"""
_setup_logging(env)
if not automated:
_configure_fabric_environment(env, flavor)
time_start = _print_time_stats("Chef provision for recipe(s) '{0}'".format(recipe), "start")
_configure_chef(env, chef)
recipes = recipe if isinstance(recipe, list) else [recipe]
for recipe_to_add in recipes:
chef.add_recipe(recipe_to_add)
_chef_provision(env, recipes)
_print_time_stats("Chef provision for recipe(s) '%s'" % recipe, "end", time_start)
def install_puppet_class(classes, automated=False, flavor=None):
"""Install one or more puppet classes by name.
Usage: fab [-i key] [-u user] -H host install_puppet_class:class
:type classes: string or list
:param classes: TODO
:type automated: bool
:param automated: If set to True, the environment is not loaded.
"""
_setup_logging(env)
if not automated:
_configure_fabric_environment(env, flavor)
time_start = _print_time_stats("Puppet provision for class(es) '{0}'".format(classes), "start")
classes = classes if isinstance(classes, list) else [classes]
_puppet_provision(env, classes)
_print_time_stats("Puppet provision for classes(s) '%s'" % classes, "end", time_start)
def install_custom(p, automated=False, pkg_to_group=None, flavor=None):
"""
Install a single custom program or package by name.
This method fetches program name from ``config/custom.yaml`` and delegates
to a method in ``custom/*name*.py`` to proceed with the installation.
Alternatively, if a program install method is defined in the appropriate
package, it will be called directly (see param ``p``).
Usage: fab [-i key] [-u user] -H host install_custom:program_name
:type p: string
:param p: A name of the custom program to install. This has to be either a name
that is listed in ``custom.yaml`` as a subordinate to a group name or a
program name whose install method is defined in either ``cloudbio`` or
``custom`` packages
(e.g., ``cloudbio/custom/cloudman.py -> install_cloudman``).
:type automated: bool
:param automated: If set to True, the environment is not loaded and reading of
the ``custom.yaml`` is skipped.
"""
p = p.lower() # All packages listed in custom.yaml are in lower case
if not automated:
_setup_logging(env)
_configure_fabric_environment(env, flavor, ignore_distcheck=True)
pkg_config = get_config_file(env, "custom.yaml").base
packages, pkg_to_group = _yaml_to_packages(pkg_config, None)
time_start = _print_time_stats("Custom install for '{0}'".format(p), "start")
fn = _custom_install_function(env, p, pkg_to_group)
fn(env)
## TODO: Replace the previous 4 lines with the following one, barring
## objections. Slightly different behavior because pkg_to_group will be
## loaded regardless of automated if it is None, but IMO this shouldn't
## matter because the following steps look like they would fail if
## automated is True and pkg_to_group is None.
# _install_custom(p, pkg_to_group)
_print_time_stats("Custom install for '%s'" % p, "end", time_start)
def _install_custom(p, pkg_to_group=None):
if pkg_to_group is None:
pkg_config = get_config_file(env, "custom.yaml").base
packages, pkg_to_group = _yaml_to_packages(pkg_config, None)
fn = _custom_install_function(env, p, pkg_to_group)
fn(env)
def install_brew(p=None, version=None, flavor=None, automated=False):
"""Top level access to homebrew/linuxbrew packages.
p is a package name to install, or all configured packages if not specified.
"""
if not automated:
_setup_logging(env)
_configure_fabric_environment(env, flavor, ignore_distcheck=True)
if p is not None:
if version:
p = "%s==%s" % (p, version)
brew.install_packages(env, packages=[p])
else:
pkg_install = _read_main_config()[0]
brew.install_packages(env, to_install=pkg_install)
def install_conda(p=None, flavor=None, automated=False):
if not automated:
_setup_logging(env)
_configure_fabric_environment(env, flavor, ignore_distcheck=True)
if p is not None:
conda.install_packages(env, packages=[p])
else:
pkg_install = _read_main_config()[0]
conda.install_packages(env, to_install=pkg_install)
def _custom_install_function(env, p, pkg_to_group):
"""
Find custom install function to execute based on package name to
pkg_to_group dict.
"""
try:
# Allow direct calling of a program install method, even if the program
# is not listed in the custom list (ie, not contained as a key value in
# pkg_to_group). For an example, see 'install_cloudman' or use p=cloudman.
mod_name = pkg_to_group[p] if p in pkg_to_group else p
env.logger.debug("Importing module cloudbio.custom.%s" % mod_name)
mod = __import__("cloudbio.custom.%s" % mod_name,
fromlist=["cloudbio", "custom"])
except ImportError:
raise ImportError("Need to write module cloudbio.custom.%s" %
pkg_to_group[p])
replace_chars = ["-"]
try:
for to_replace in replace_chars:
p = p.replace(to_replace, "_")
env.logger.debug("Looking for custom install function %s.install_%s"
% (mod.__name__, p))
fn = getattr(mod, "install_%s" % p)
except AttributeError:
raise ImportError("Need to write a install_%s function in custom.%s"
% (p, pkg_to_group[p]))
return fn
def _read_main_config():
"""Pull a list of groups to install based on our main configuration YAML.
Reads 'main.yaml' and returns packages and libraries
"""
yaml_file = get_config_file(env, "main.yaml").base
with open(yaml_file) as in_handle:
full_data = yaml.safe_load(in_handle)
packages = full_data.get('packages', [])
packages = env.flavor.rewrite_config_items("main_packages", packages)
libraries = full_data.get('libraries', [])
custom_ignore = full_data.get('custom_ignore', [])
custom_add = full_data.get("custom_additional")
if packages is None: packages = []
if libraries is None: libraries = []
if custom_ignore is None: custom_ignore = []
env.logger.info("Meta-package information from {2}\n- Packages: {0}\n- Libraries: "
"{1}".format(",".join(packages), ",".join(libraries), yaml_file))
return packages, sorted(libraries), custom_ignore, custom_add
# ### Library specific installation code
def _python_library_installer(config):
"""Install python specific libraries using pip, conda and easy_install.
Handles using isolated anaconda environments.
"""
if shared._is_anaconda(env):
conda_bin = shared._conda_cmd(env)
for pname in env.flavor.rewrite_config_items("python", config.get("conda", [])):
env.safe_run("{0} install --yes {1}".format(conda_bin, pname))
cmd = env.safe_run
with settings(warn_only=True):
cmd("%s -U distribute" % os.path.join(os.path.dirname(conda_bin), "easy_install"))
else:
pip_bin = shared._pip_cmd(env)
ei_bin = pip_bin.replace("pip", "easy_install")
env.safe_sudo("%s -U pip" % ei_bin)
with settings(warn_only=True):
env.safe_sudo("%s -U distribute" % ei_bin)
cmd = env.safe_sudo
for pname in env.flavor.rewrite_config_items("python", config['pypi']):
cmd("{0} install --upgrade {1} --allow-unverified {1} --allow-external {1}".format(shared._pip_cmd(env), pname)) # fixes problem with packages not being in pypi
def _ruby_library_installer(config):
"""Install ruby specific gems.
"""
gem_ext = getattr(env, "ruby_version_ext", "")
def _cur_gems():
with settings(
hide('warnings', 'running', 'stdout', 'stderr')):
gem_info = env.safe_run_output("gem%s list --no-versions" % gem_ext)
return [l.rstrip("\r") for l in gem_info.split("\n") if l.rstrip("\r")]
installed = _cur_gems()
for gem in env.flavor.rewrite_config_items("ruby", config['gems']):
# update current gems only to check for new installs
if gem not in installed:
installed = _cur_gems()
if gem in installed:
env.safe_sudo("gem%s update %s" % (gem_ext, gem))
else:
env.safe_sudo("gem%s install %s" % (gem_ext, gem))
def _perl_library_installer(config):
"""Install perl libraries from CPAN with cpanminus.
"""
with shared._make_tmp_dir() as tmp_dir:
with cd(tmp_dir):
env.safe_run("wget --no-check-certificate -O cpanm "
"https://raw.github.com/miyagawa/cpanminus/master/cpanm")
env.safe_run("chmod a+rwx cpanm")
env.safe_sudo("mv cpanm %s/bin" % env.system_install)
sudo_str = "--sudo" if env.use_sudo else ""
for lib in env.flavor.rewrite_config_items("perl", config['cpan']):
# Need to hack stdin because of some problem with cpanminus script that
# causes fabric to hang
# http://agiletesting.blogspot.com/2010/03/getting-past-hung-remote-processes-in.html
env.safe_run("cpanm %s --skip-installed --notest %s < /dev/null" % (sudo_str, lib))
def _haskell_library_installer(config):
"""Install haskell libraries using cabal.
"""
run("cabal update")
for lib in config["cabal"]:
sudo_str = "--root-cmd=sudo" if env.use_sudo else ""
env.safe_run("cabal install %s --global %s" % (sudo_str, lib))
lib_installers = {
"r-libs" : libraries.r_library_installer,
"python-libs" : _python_library_installer,
"ruby-libs" : _ruby_library_installer,
"perl-libs" : _perl_library_installer,
"haskell-libs": _haskell_library_installer,
}
def install_libraries(language):
"""High level target to install libraries for a specific language.
"""
_setup_logging(env)
_check_fabric_version()
_configure_fabric_environment(env, ignore_distcheck=True)
_do_library_installs(["%s-libs" % language])
def _do_library_installs(to_install):
for iname in to_install:
yaml_file = get_config_file(env, "%s.yaml" % iname).base
with open(yaml_file) as in_handle:
config = yaml.safe_load(in_handle)
lib_installers[iname](config)
|
chapmanb/cloudbiolinux
|
fabfile.py
|
Python
|
mit
| 19,514
|
from __future__ import unicode_literals
__version__ = '0.8.0.1'
|
jstoxrocky/lifelines
|
lifelines/version.py
|
Python
|
mit
| 65
|
'''
Window Pygame: windowing provider based on Pygame
.. warning::
Pygame has been deprecated and will be removed in the release after Kivy
1.11.0.
'''
__all__ = ('WindowPygame', )
# fail early if possible
import pygame
from kivy.compat import PY2
from kivy.core.window import WindowBase
from kivy.core import CoreCriticalException
from os import environ
from os.path import exists, join
from kivy.config import Config
from kivy import kivy_data_dir
from kivy.base import ExceptionManager
from kivy.logger import Logger
from kivy.base import stopTouchApp, EventLoop
from kivy.utils import platform, deprecated
from kivy.resources import resource_find
try:
android = None
if platform == 'android':
import android
except ImportError:
pass
# late binding
glReadPixels = GL_RGBA = GL_UNSIGNED_BYTE = None
class WindowPygame(WindowBase):
@deprecated(
msg='Pygame has been deprecated and will be removed after 1.11.0')
def __init__(self, *largs, **kwargs):
super(WindowPygame, self).__init__(*largs, **kwargs)
def create_window(self, *largs):
# ensure the mouse is still not up after window creation, otherwise, we
# have some weird bugs
self.dispatch('on_mouse_up', 0, 0, 'all', [])
# force display to show (available only for fullscreen)
displayidx = Config.getint('graphics', 'display')
if 'SDL_VIDEO_FULLSCREEN_HEAD' not in environ and displayidx != -1:
environ['SDL_VIDEO_FULLSCREEN_HEAD'] = '%d' % displayidx
# init some opengl, same as before.
self.flags = pygame.HWSURFACE | pygame.OPENGL | pygame.DOUBLEBUF
# right now, activate resizable window only on linux.
# on window / macosx, the opengl context is lost, and we need to
# reconstruct everything. Check #168 for a state of the work.
if platform in ('linux', 'macosx', 'win') and \
Config.getboolean('graphics', 'resizable'):
self.flags |= pygame.RESIZABLE
try:
pygame.display.init()
except pygame.error as e:
raise CoreCriticalException(e.message)
multisamples = Config.getint('graphics', 'multisamples')
if multisamples > 0:
pygame.display.gl_set_attribute(pygame.GL_MULTISAMPLEBUFFERS, 1)
pygame.display.gl_set_attribute(pygame.GL_MULTISAMPLESAMPLES,
multisamples)
pygame.display.gl_set_attribute(pygame.GL_DEPTH_SIZE, 16)
pygame.display.gl_set_attribute(pygame.GL_STENCIL_SIZE, 1)
pygame.display.set_caption(self.title)
if self.position == 'auto':
self._pos = None
elif self.position == 'custom':
self._pos = self.left, self.top
else:
raise ValueError('position token in configuration accept only '
'"auto" or "custom"')
if self._fake_fullscreen:
if not self.borderless:
self.fullscreen = self._fake_fullscreen = False
elif not self.fullscreen or self.fullscreen == 'auto':
self.borderless = self._fake_fullscreen = False
if self.fullscreen == 'fake':
self.borderless = self._fake_fullscreen = True
Logger.warning("The 'fake' fullscreen option has been "
"deprecated, use Window.borderless or the "
"borderless Config option instead.")
if self.fullscreen == 'fake' or self.borderless:
Logger.debug('WinPygame: Set window to borderless mode.')
self.flags |= pygame.NOFRAME
# If no position set in borderless mode, we always need
# to set the position. So use 0, 0.
if self._pos is None:
self._pos = (0, 0)
environ['SDL_VIDEO_WINDOW_POS'] = '%d,%d' % self._pos
elif self.fullscreen in ('auto', True):
Logger.debug('WinPygame: Set window to fullscreen mode')
self.flags |= pygame.FULLSCREEN
elif self._pos is not None:
environ['SDL_VIDEO_WINDOW_POS'] = '%d,%d' % self._pos
# never stay with a None pos, application using w.center will be fired.
self._pos = (0, 0)
# prepare keyboard
repeat_delay = int(Config.get('kivy', 'keyboard_repeat_delay'))
repeat_rate = float(Config.get('kivy', 'keyboard_repeat_rate'))
pygame.key.set_repeat(repeat_delay, int(1000. / repeat_rate))
# set window icon before calling set_mode
try:
filename_icon = self.icon or Config.get('kivy', 'window_icon')
if filename_icon == '':
logo_size = 32
if platform == 'macosx':
logo_size = 512
elif platform == 'win':
logo_size = 64
filename_icon = 'kivy-icon-{}.png'.format(logo_size)
filename_icon = resource_find(
join(kivy_data_dir, 'logo', filename_icon))
self.set_icon(filename_icon)
except:
Logger.exception('Window: cannot set icon')
# try to use mode with multisamples
try:
self._pygame_set_mode()
except pygame.error as e:
if multisamples:
Logger.warning('WinPygame: Video: failed (multisamples=%d)' %
multisamples)
Logger.warning('WinPygame: trying without antialiasing')
pygame.display.gl_set_attribute(
pygame.GL_MULTISAMPLEBUFFERS, 0)
pygame.display.gl_set_attribute(
pygame.GL_MULTISAMPLESAMPLES, 0)
multisamples = 0
try:
self._pygame_set_mode()
except pygame.error as e:
raise CoreCriticalException(e.message)
else:
raise CoreCriticalException(e.message)
if pygame.RESIZABLE & self.flags:
self._pygame_set_mode()
info = pygame.display.Info()
self._size = (info.current_w, info.current_h)
# self.dispatch('on_resize', *self._size)
# in order to debug futur issue with pygame/display, let's show
# more debug output.
Logger.debug('Window: Display driver ' + pygame.display.get_driver())
Logger.debug('Window: Actual window size: %dx%d',
info.current_w, info.current_h)
if platform != 'android':
# unsupported platform, such as android that doesn't support
# gl_get_attribute.
Logger.debug(
'Window: Actual color bits r%d g%d b%d a%d',
pygame.display.gl_get_attribute(pygame.GL_RED_SIZE),
pygame.display.gl_get_attribute(pygame.GL_GREEN_SIZE),
pygame.display.gl_get_attribute(pygame.GL_BLUE_SIZE),
pygame.display.gl_get_attribute(pygame.GL_ALPHA_SIZE))
Logger.debug(
'Window: Actual depth bits: %d',
pygame.display.gl_get_attribute(pygame.GL_DEPTH_SIZE))
Logger.debug(
'Window: Actual stencil bits: %d',
pygame.display.gl_get_attribute(pygame.GL_STENCIL_SIZE))
Logger.debug(
'Window: Actual multisampling samples: %d',
pygame.display.gl_get_attribute(pygame.GL_MULTISAMPLESAMPLES))
super(WindowPygame, self).create_window()
# set mouse visibility
self._set_cursor_state(self.show_cursor)
# if we are on android platform, automatically create hooks
if android:
from kivy.support import install_android
install_android()
def close(self):
pygame.display.quit()
super(WindowPygame, self).close()
def on_title(self, instance, value):
if self.initialized:
pygame.display.set_caption(self.title)
def set_icon(self, filename):
if not exists(filename):
return False
try:
if platform == 'win':
try:
if self._set_icon_win(filename):
return True
except:
# fallback on standard loading then.
pass
# for all others platform, or if the ico is not available, use the
# default way to set it.
self._set_icon_standard(filename)
super(WindowPygame, self).set_icon(filename)
except:
Logger.exception('WinPygame: unable to set icon')
def _set_icon_standard(self, filename):
if PY2:
try:
im = pygame.image.load(filename)
except UnicodeEncodeError:
im = pygame.image.load(filename.encode('utf8'))
else:
im = pygame.image.load(filename)
if im is None:
raise Exception('Unable to load window icon (not found)')
pygame.display.set_icon(im)
def _set_icon_win(self, filename):
# ensure the window ico is ended by ico
if not filename.endswith('.ico'):
filename = '{}.ico'.format(filename.rsplit('.', 1)[0])
if not exists(filename):
return False
import win32api
import win32gui
import win32con
hwnd = pygame.display.get_wm_info()['window']
icon_big = win32gui.LoadImage(
None, filename, win32con.IMAGE_ICON,
48, 48, win32con.LR_LOADFROMFILE)
icon_small = win32gui.LoadImage(
None, filename, win32con.IMAGE_ICON,
16, 16, win32con.LR_LOADFROMFILE)
win32api.SendMessage(
hwnd, win32con.WM_SETICON, win32con.ICON_SMALL, icon_small)
win32api.SendMessage(
hwnd, win32con.WM_SETICON, win32con.ICON_BIG, icon_big)
return True
def _set_cursor_state(self, value):
pygame.mouse.set_visible(value)
def screenshot(self, *largs, **kwargs):
global glReadPixels, GL_RGBA, GL_UNSIGNED_BYTE
filename = super(WindowPygame, self).screenshot(*largs, **kwargs)
if filename is None:
return None
if glReadPixels is None:
from kivy.graphics.opengl import (glReadPixels, GL_RGBA,
GL_UNSIGNED_BYTE)
width, height = self.system_size
data = glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE)
data = bytes(bytearray(data))
surface = pygame.image.fromstring(data, (width, height), 'RGBA', True)
pygame.image.save(surface, filename)
Logger.debug('Window: Screenshot saved at <%s>' % filename)
return filename
def flip(self):
pygame.display.flip()
super(WindowPygame, self).flip()
@deprecated
def toggle_fullscreen(self):
if self.flags & pygame.FULLSCREEN:
self.flags &= ~pygame.FULLSCREEN
else:
self.flags |= pygame.FULLSCREEN
self._pygame_set_mode()
def mainloop(self):
for event in pygame.event.get():
# kill application (SIG_TERM)
if event.type == pygame.QUIT:
if self.dispatch('on_request_close'):
continue
EventLoop.quit = True
self.close()
# mouse move
elif event.type == pygame.MOUSEMOTION:
x, y = event.pos
self.mouse_pos = x, self.system_size[1] - y
# don't dispatch motion if no button are pressed
if event.buttons == (0, 0, 0):
continue
self._mouse_x = x
self._mouse_y = y
self._mouse_meta = self.modifiers
self.dispatch('on_mouse_move', x, y, self.modifiers)
# mouse action
elif event.type in (pygame.MOUSEBUTTONDOWN,
pygame.MOUSEBUTTONUP):
self._pygame_update_modifiers()
x, y = event.pos
btn = 'left'
if event.button == 3:
btn = 'right'
elif event.button == 2:
btn = 'middle'
elif event.button == 4:
btn = 'scrolldown'
elif event.button == 5:
btn = 'scrollup'
elif event.button == 6:
btn = 'scrollright'
elif event.button == 7:
btn = 'scrollleft'
eventname = 'on_mouse_down'
if event.type == pygame.MOUSEBUTTONUP:
eventname = 'on_mouse_up'
self._mouse_x = x
self._mouse_y = y
self._mouse_meta = self.modifiers
self._mouse_btn = btn
self._mouse_down = eventname == 'on_mouse_down'
self.dispatch(eventname, x, y, btn, self.modifiers)
# joystick action
elif event.type == pygame.JOYAXISMOTION:
self.dispatch('on_joy_axis', event.joy, event.axis,
event.value)
elif event.type == pygame.JOYHATMOTION:
self.dispatch('on_joy_hat', event.joy, event.hat, event.value)
elif event.type == pygame.JOYBALLMOTION:
self.dispatch('on_joy_ball', event.joy, event.ballid,
event.rel[0], event.rel[1])
elif event.type == pygame.JOYBUTTONDOWN:
self.dispatch('on_joy_button_down', event.joy, event.button)
elif event.type == pygame.JOYBUTTONUP:
self.dispatch('on_joy_button_up', event.joy, event.button)
# keyboard action
elif event.type in (pygame.KEYDOWN, pygame.KEYUP):
self._pygame_update_modifiers(event.mod)
# atm, don't handle keyup
if event.type == pygame.KEYUP:
self.dispatch('on_key_up', event.key,
event.scancode)
continue
# don't dispatch more key if down event is accepted
if self.dispatch('on_key_down', event.key,
event.scancode, event.unicode,
self.modifiers):
continue
self.dispatch('on_keyboard', event.key,
event.scancode, event.unicode,
self.modifiers)
# video resize
elif event.type == pygame.VIDEORESIZE:
self._size = event.size
self.update_viewport()
elif event.type == pygame.VIDEOEXPOSE:
self.canvas.ask_update()
# ignored event
elif event.type == pygame.ACTIVEEVENT:
pass
# drop file (pygame patch needed)
elif event.type == pygame.USEREVENT and \
hasattr(pygame, 'USEREVENT_DROPFILE') and \
event.code == pygame.USEREVENT_DROPFILE:
self.dispatch('on_dropfile', event.filename)
'''
# unhandled event !
else:
Logger.debug('WinPygame: Unhandled event %s' % str(event))
'''
if not pygame.display.get_active():
pygame.time.wait(100)
#
# Pygame wrapper
#
def _pygame_set_mode(self, size=None):
if size is None:
size = self.size
if self.fullscreen == 'auto':
pygame.display.set_mode((0, 0), self.flags)
else:
pygame.display.set_mode(size, self.flags)
def _pygame_update_modifiers(self, mods=None):
# Available mod, from dir(pygame)
# 'KMOD_ALT', 'KMOD_CAPS', 'KMOD_CTRL', 'KMOD_LALT',
# 'KMOD_LCTRL', 'KMOD_LMETA', 'KMOD_LSHIFT', 'KMOD_META',
# 'KMOD_MODE', 'KMOD_NONE'
if mods is None:
mods = pygame.key.get_mods()
self._modifiers = []
if mods & (pygame.KMOD_SHIFT | pygame.KMOD_LSHIFT):
self._modifiers.append('shift')
if mods & (pygame.KMOD_ALT | pygame.KMOD_LALT):
self._modifiers.append('alt')
if mods & (pygame.KMOD_CTRL | pygame.KMOD_LCTRL):
self._modifiers.append('ctrl')
if mods & (pygame.KMOD_META | pygame.KMOD_LMETA):
self._modifiers.append('meta')
def request_keyboard(self, callback, target, input_type='text'):
keyboard = super(WindowPygame, self).request_keyboard(
callback, target, input_type)
if android and not self.allow_vkeyboard:
android.show_keyboard(target, input_type)
return keyboard
def release_keyboard(self, *largs):
super(WindowPygame, self).release_keyboard(*largs)
if android:
android.hide_keyboard()
return True
|
inclement/kivy
|
kivy/core/window/window_pygame.py
|
Python
|
mit
| 17,048
|
from os import getenv
from django.conf import settings
def _setting(key, default):
return getenv(key, default) or getattr(settings, key, default)
# API key from evnironment by default
API_KEY = _setting("ONFIDO_API_KEY", None)
# Webhook token - see https://documentation.onfido.com/#webhooks
WEBHOOK_TOKEN = _setting("ONFIDO_WEBHOOK_TOKEN", None)
# token must be a bytestring for HMAC function to work
WEBHOOK_TOKEN = str.encode(WEBHOOK_TOKEN) if WEBHOOK_TOKEN else None
# Set to False to turn off event logging
LOG_EVENTS = _setting("ONFIDO_LOG_EVENTS", True)
# Set to True to bypass request verification (NOT RECOMMENDED)
TEST_MODE = _setting("ONFIDO_TEST_MODE", False)
def DEFAULT_REPORT_SCRUBBER(raw):
"""Remove breakdown and properties."""
return {k: v for k, v in raw.items() if k not in ("breakdown", "properties")}
def DEFAULT_APPLICANT_SCRUBBER(raw):
"""Remove all personal data."""
return {k: v for k, v in raw.items() if k in ("id", "href", "created_at")}
# functions used to scrub sensitive data from reports
scrub_report_data = (
getattr(settings, "ONFIDO_REPORT_SCRUBBER", None) or DEFAULT_REPORT_SCRUBBER
)
scrub_applicant_data = (
getattr(settings, "ONFIDO_APPLICANT_SCRUBBER", None) or DEFAULT_APPLICANT_SCRUBBER
)
|
yunojuno/django-onfido
|
onfido/settings.py
|
Python
|
mit
| 1,275
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import base64
from typing import TYPE_CHECKING
from cryptography import x509
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey
from cryptography.hazmat.backends import default_backend
import six
if TYPE_CHECKING:
# pylint:disable=unused-import,ungrouped-imports
from typing import Optional
class AadClientCertificate(object):
"""Wraps 'cryptography' to provide the crypto operations AadClient requires for certificate authentication.
:param bytes pem_bytes: bytes of a a PEM-encoded certificate including the (RSA) private key
:param bytes password: (optional) the certificate's password
"""
def __init__(self, pem_bytes, password=None):
# type: (bytes, Optional[bytes]) -> None
private_key = serialization.load_pem_private_key(pem_bytes, password=password, backend=default_backend())
if not isinstance(private_key, RSAPrivateKey):
raise ValueError("The certificate must have an RSA private key because RS256 is used for signing")
self._private_key = private_key
cert = x509.load_pem_x509_certificate(pem_bytes, default_backend())
fingerprint = cert.fingerprint(hashes.SHA1()) # nosec
self._thumbprint = six.ensure_str(base64.urlsafe_b64encode(fingerprint), encoding="utf-8")
@property
def thumbprint(self):
# type: () -> str
"""The certificate's SHA1 thumbprint as a base64url-encoded string"""
return self._thumbprint
def sign(self, plaintext):
# type: (bytes) -> bytes
"""Sign bytes using RS256"""
return self._private_key.sign(plaintext, padding.PKCS1v15(), hashes.SHA256())
|
Azure/azure-sdk-for-python
|
sdk/identity/azure-identity/azure/identity/_internal/aadclient_certificate.py
|
Python
|
mit
| 1,944
|
import pytest
from viper import compiler
valid_list = [
"""
x: public(num)
""",
"""
x: public(num(wei / sec))
y: public(num(wei / sec ** 2))
z: public(num(1 / sec))
def foo() -> num(sec ** 2):
return self.x / self.y / self.z
"""
]
@pytest.mark.parametrize('good_code', valid_list)
def test_public_success(good_code):
assert compiler.compile(good_code) is not None
|
NedYork/viper
|
tests/parser/syntax/test_public.py
|
Python
|
mit
| 394
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from builtins import str
from builtins import range
from lxml import etree
import sys
import os.path
from . import data_prep_utils
import re
import csv
from argparse import ArgumentParser
from collections import OrderedDict
import io
if sys.version < '3' :
from backports import csv
else :
import csv
def consoleLabel(raw_strings, labels, module):
print('\nStart console labeling!\n')
valid_input_tags = OrderedDict([(str(i), label) for i, label in enumerate(labels)])
printHelp(valid_input_tags)
valid_responses = ['y', 'n', 's', 'f', '']
finished = False
strings_left_to_tag = raw_strings.copy()
total_strings = len(raw_strings)
tagged_strings = set([])
for i, raw_sequence in enumerate(raw_strings, 1):
if not finished:
print('\n(%s of %s)' % (i, total_strings))
print('-'*50)
print('STRING: %s' %raw_sequence)
preds = module.parse(raw_sequence)
user_input = None
while user_input not in valid_responses :
friendly_repr = [(token[0].strip(), token[1]) for token in preds]
print_table(friendly_repr)
sys.stderr.write('Is this correct? (y)es / (n)o / (s)kip / (f)inish tagging / (h)elp\n')
user_input = sys.stdin.readline().strip()
if user_input =='y':
tagged_strings.add(tuple(preds))
strings_left_to_tag.remove(raw_sequence)
elif user_input =='n':
corrected_string = manualTagging(preds,
valid_input_tags)
tagged_strings.add(tuple(corrected_string))
strings_left_to_tag.remove(raw_sequence)
elif user_input in ('h', 'help', '?') :
printHelp(valid_input_tags)
elif user_input in ('' or 's') :
print('Skipped\n')
elif user_input == 'f':
finished = True
print('Done! Yay!')
return tagged_strings, strings_left_to_tag
def print_table(table):
col_width = [max(len(x) for x in col) for col in zip(*table)]
for line in table:
print(u"| %s |" % " | ".join(u"{:{}}".format(x, col_width[i])
for i, x in enumerate(line)))
def manualTagging(preds, valid_input_tags):
tagged_sequence = []
for token, predicted_tag in preds:
while True:
print('What is \'%s\' ? If %s hit return' % (token, predicted_tag))
user_choice = sys.stdin.readline().strip()
if user_choice == '' :
tag = predicted_tag
break
elif user_choice in valid_input_tags :
tag = valid_input_tags[user_choice]
break
elif user_choice in ('h', 'help', '?') :
printHelp(valid_input_tags)
elif user_choice == 'oops':
print('No worries! Let\'s start over in labeling this string')
tagged_sequence_redo = manualTagging(preds, valid_input_tags)
return tagged_sequence_redo
else:
print("That is not a valid tag. Type 'help' to see the valid inputs")
tagged_sequence.append((token, tag))
return tagged_sequence
def naiveConsoleLabel(raw_strings, labels, module):
print('\nStart console labeling!\n')
valid_input_tags = OrderedDict([(str(i), label) for i, label in enumerate(labels)])
printHelp(valid_input_tags)
valid_responses = ['t', 's', 'f', '']
finished = False
strings_left_to_tag = raw_strings.copy()
total_strings = len(raw_strings)
tagged_strings = set([])
for i, raw_sequence in enumerate(raw_strings, 1):
if not finished:
print('\n(%s of %s)' % (i, total_strings))
print('-'*50)
print('STRING: %s' %raw_sequence)
tokens = module.tokenize(raw_sequence)
user_input = None
while user_input not in valid_responses :
sys.stderr.write('(t)ag / (s)kip / (f)inish tagging / (h)elp\n')
user_input = sys.stdin.readline().strip()
if user_input =='t' or user_input == '':
tagged_sequence = naiveManualTag(tokens, valid_input_tags)
tagged_strings.add(tuple(tagged_sequence))
strings_left_to_tag.remove(raw_sequence)
elif user_input in ('h', 'help', '?') :
printHelp(valid_input_tags)
elif user_input == 's':
print('Skipped\n')
elif user_input == 'f':
finished = True
print('Done! Yay!')
return tagged_strings, strings_left_to_tag
def naiveManualTag(raw_sequence, valid_input_tags):
sequence_labels = []
for token in raw_sequence:
valid_tag = False
while not valid_tag:
print('What is \'%s\' ?' %token)
user_input_tag = sys.stdin.readline().strip()
if user_input_tag in valid_input_tags:
valid_tag = True
elif user_input_tag in ('h', 'help', '?') :
printHelp(valid_input_tags)
elif user_input_tag == 'oops':
print('No worries! Let\'s start over in labeling this string')
sequence_labels_redo = naiveManualTag(raw_sequence, valid_input_tags)
return sequence_labels_redo
else:
print("That is not a valid tag. Type 'help' to see the valid inputs")
token_label = valid_input_tags[user_input_tag]
sequence_labels.append((token, token_label))
return sequence_labels
def printHelp(valid_input_tags):
print('*'*50)
print('These are the tags available for labeling:')
for valid_input in valid_input_tags:
print('%s : %s' %(valid_input, valid_input_tags[valid_input]))
print("\ntype 'help' at any time to see labels")
print("type 'oops' if you make a labeling error\n")
print('*'*50, '\n')
def label(module, infile, outfile, xml):
training_data = data_prep_utils.TrainingData(xml, module)
reader = csv.reader(infile)
strings = set(row[0] for row in reader)
labels = module.LABELS
if module.TAGGER:
labeled_list, raw_strings_left = consoleLabel(strings, labels, module)
else:
labeled_list, raw_strings_left = naiveConsoleLabel(strings, labels, module)
training_data.extend(labeled_list)
with open(outfile, 'wb'):
training_data.write(outfile)
file_slug = os.path.basename(infile.name)
if not file_slug.startswith('unlabeled_'):
file_slug = 'unlabeled_' + file_slug
remainder_file = os.path.dirname(infile.name) + file_slug
data_prep_utils.list2file(raw_strings_left, remainder_file)
|
datamade/parserator
|
parserator/manual_labeling.py
|
Python
|
mit
| 7,110
|
#!/usr/bin/env python
#---------------------------------------
# IMPORTS
#---------------------------------------
import test
from pymake2 import *
#---------------------------------------
# FUNCTIONS
#---------------------------------------
@target
@depends_on('my_target_3')
def my_target_1(conf):
pass
@target
@depends_on('my_target_1')
def my_target_2(conf):
pass
@target
@depends_on('my_target_2')
def my_target_3(conf):
pass
#---------------------------------------
# SCRIPT
#---------------------------------------
test.should_fail()
pymake2({}, [ 'my_target_3' ])
test.success()
|
philiparvidsson/pymake2
|
tests/make_depends_circular.py
|
Python
|
mit
| 610
|
from distutils.core import setup
setup(
name = 'Crollo',
version = '1.0.1',
packages = ['crollo',],
license = 'MIT',
long_description = open('README.md').read(),
)
|
davidnjakai/bc-8-todo-console-application
|
setup.py
|
Python
|
mit
| 169
|
#!/usr/bin/python3
"""
Given an array of characters, compress it in-place.
The length after compression must always be smaller than or equal to the original array.
Every element of the array should be a character (not int) of length 1.
After you are done modifying the input array in-place, return the new length of the array.
Follow up:
Could you solve it using only O(1) extra space?
"""
class Solution:
def compress(self, chars):
"""
tedious pointer manipulation
:type chars: List[str]
:rtype: int
"""
ret = 1
s = 0 # start index of current char
for i in range(1, len(chars) + 1):
if i < len(chars) and chars[i] == chars[s]:
continue
l = i - s
if l > 1:
for digit in str(l):
chars[ret] = digit
ret += 1
if i < len(chars):
chars[ret] = chars[i]
ret += 1
s = i
return ret
def compress_error(self, chars):
"""
tedious pointer manipulation
:type chars: List[str]
:rtype: int
"""
s = 0
for idx in range(1, len(chars) + 1):
if idx < len(chars) and chars[idx] == chars[s]:
continue
l = idx - s
if l == 1:
s = min(s + 1, len(chars) - 1)
else:
for digit in str(l):
s += 1
chars[s] = digit
if idx < len(chars):
s += 1
chars[s] = chars[idx]
return s + 1
if __name__ == "__main__":
assert Solution().compress(["a"]) == 1
assert Solution().compress(["a","a","b","b","c","c","c"]) == 6
assert Solution().compress(["a","b","b","b","b","b","b","b","b","b","b","b","b"]) == 4
|
algorhythms/LeetCode
|
443 String Compression.py
|
Python
|
mit
| 1,906
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <rawcell>
# #!/usr/bin/env python
# <codecell>
from __future__ import division
from __future__ import with_statement
import numpy as np
from pylab import ion
import matplotlib as mpl
from matplotlib.path import Path
from matplotlib import pyplot as plt
from matplotlib import animation
from scipy.optimize import curve_fit
from scipy.weave import inline, converters
import sys
import time
import cPickle as pickle
from JSAnimation import IPython_display, HTMLWriter
from smartFormat import smartFormat
from plotGoodies import plotDefaults
plotDefaults()
# <codecell>
__author__ = "J.L. Lanfranchi"
__email__ = "jll1062@phys.psu.edu"
__copyright__ = "Copyright 2014 J.L. Lanfranchi"
__credits__ = ["J.L. Lanfranchi"]
__license__ = """Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without
limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
# <codecell>
#-- Turn live-interactive plotting on (makes updated plots appear animated)
ion()
#-- Adjust the font used on the plots
font = {'family' : 'serif', 'weight' : 'normal', 'size' : 8}
#mpl.rcParams('font', **font)
# <codecell>
class WaxWendroff:
def __init__(self):
self.c_lax_wendroff = """
py::list ret;
double beta2 = beta*beta;
double u_i2;
double u_ip12;
double u_im12;
double this_diff;
double max_ydiff = 0;
int j;
int i = 0;
//u_i2 = u0(i)*u0(i);
//u_ip12 = u0(i+1)*u0(i+1);
for (j=0; j<n_skip; j++) {
for (i=1; i<m-1; i++) {
//u_im12 = u_i2;
//u_i2 = u_ip2;
u_i2 = u0(i)*u0(i);
u_im12 = u0(i-1)*u0(i-1);
u_ip12 = u0(i+1)*u0(i+1);
//-- Lax - Wendroff scheme
u(i) = u0(i)
- 0.25*beta*(u_ip12 - u_im12)
+ 0.125*beta2 * ( (u0(i+1)+u0(i))*(u_ip12-u_i2)
- (u0(i)+u0(i-1))*(u_i2-u_im12) );
this_diff = fabs(u(i)-u(i-1));
if (this_diff > max_ydiff)
max_ydiff = this_diff;
//-- Update "present step" array element with what was just computed as
// the next step" value for this array element
u0(i) = u(i);
}
}
//for (i=1; i<m-1; i++)
// u0(i) = u(i);
//-- Enforce boundary conditions
//u(0) = 0;
//u(m-1) = 0;
ret.append(max_ydiff);
return_val = ret;
"""
self.m = 1000
self.c = 1.0
#dx = 1./m
self.dx = 2*np.pi/self.m
self.dt = self.dx/10
self.epsilon = 1.0
self.beta = self.epsilon*self.dt/self.dx
self.u = np.zeros((self.m+1),float)
self.u0 = np.zeros((self.m+1), float)
self.uf = np.zeros((self.m+1),float)
self.T_final = 100
self.maxN = int(self.T_final/self.dt)
print "dt =", self.dt, ", dx =", self.dx, \
", epsilon =", self.epsilon, ", beta =", self.beta
self.x = np.arange(-(self.m/2)*self.dx,(self.m/2)*self.dx,self.dx)
print len(self.x)
#-- beta = 0.01
#-- epsilon = 0.2
#-- dx = 1e-3
#-- dt = 1e-4
#-- beta = epsilon*dt/dx = 0.02
self.prob = 1
if self.prob == 0:
def finalFun(x, t):
return -np.exp( - 10.*(x - 1.5 - self.c*t)**2 ) \
+ np.exp( - 10.*(x + 1.5 + self.c*t)**2 ) # Exact
elif self.prob == 1:
def finalFun(x, t):
a0 = -1.0
fx = 1 #4*np.pi
return a0/2*np.sin(fx*x-self.c*t)+a0/2*np.sin(fx*x+self.c*t)
self.u0 = finalFun(self.x, 0)
self.u = np.zeros_like(self.u0)
self.fig1 = plt.figure(1, figsize=(5,10), dpi=120)
self.fig1.clf()
self.ax1 = self.fig1.add_subplot(211)
self.ax1.plot(self.x, self.u0, '-',
color=(.6,.6,.6), lw=6, label="initial cond")
self.l_ns, = self.ax1.plot(self.x, self.u, 'o-',
markersize=2,
color='b',
markerfacecolor=(0.8,0,0,.25),
markeredgecolor=(0.8,0,0,.25),
lw=0.5,
label="numerical soln")
self.ax1.legend(loc="best")
self.ax1.set_xlim(-np.pi,np.pi)
self.ax1.set_ylim(-1,1)
self.ax1.set_xlabel(r"Spatial dimension, $x$")
self.ax1.set_title(r"Spatial wave depiction")
self.ax2 = self.fig1.add_subplot(212)
self.l_ms, = self.ax2.plot(0,0, '-o',
color='k',
markerfacecolor='g',
markersize=3,
lw=1.0)
self.ax2.set_xlabel(r"Time index, $j$")
#ax2.set_ylabel(r"Maximum spatial slope")
self.ax2.set_xlim(0, self.maxN)
self.ax2.set_ylim(0,500)
self.ax2.set_title(r"Maximum spatial slope at a given time step")
plt.tight_layout()
#-- Note: Time steps are indexed with j and spatial coordinates with i.
# The previous solution is preserved in u0 for use in computing the
# new solution, which is incrementally stored into the u array.
#
# Once the computation is complete for the new solution, the u array
# is copied into u0 for use in the next time step.
#def init(self):
self.l_ns.set_data(self.x, finalFun(self.x,0))
self.l_ms.set_data(0,0)
self.maxslopelist = []
slf.nskiplist = []
self.allj = []
self.n_skip = 1
self.j = 0
#return self.l_ns, self.l_ms
def animate(self, ii):
print "Iteration number, ii:", ii
out = inline(self.c_lax_wendroff, ['self.u', 'self.u0', 'self.beta',
'self.m', 'self.n_skip'],
type_converters=converters.blitz)
self.j += self.n_skip
self.allj.append(j)
self.slope = out[0]/self.dx
self.maxslopelist.append(self.slope)
self.n_skip = min( max(int(5e4/self.slope**2), 10), 1000)
self.n_skip = 100
self.nskiplist.append(n_skip)
print out[0]/self.dx
self.l_ns.set_ydata(self.u)
self.l_ms.set_xdata(self.allj)
self.l_ms.set_ydata(self.maxslopelist)
self.ax2.set_ylim(0,np.max(self.maxslopelist))
self.ax2.set_xlim(0,self.j)
self.fig1.canvas.draw()
#plt.draw()
#if j >= maxN or slope > 2000:
# break
#return l_ns, l_ms
#fig2 = plt.figure(2)
#fig2.clf()
#ax = fig2.add_subplot(111)
#ax.plot(nskiplist, 'm-', lw=3)
#ax.set_ylabel("n skip")
#plt.tight_layout()
ww = WaxWendroff()
animation.FuncAnimation(ww.fig1, ww.animate, frames=20, blit=True)
# <codecell>
plt.show()
# <codecell>
|
jllanfranchi/phys597_computational2
|
landau_ch19_problem19.3.2/p9x3x2_v2.py
|
Python
|
mit
| 7,841
|
import datetime
from django.core import validators
class Crontab:
"""
Simplified Crontab
Support "minute hour weekday" components of a standard cron job.
- "*/15 2,7,15 1-5" means "every fifteen minutes, on hours 2 7 15, Monday-Friday"
- Minutes are from 0-59, hours from 0-23, and days from 0(Sunday)-6(Saturday)
- Fields can contain multiple comma-separated values
- Values can be an integer or repeating pattern of the '*/2' variety
"""
def __init__(self, schedule: str):
self.schedule = schedule
components = schedule.split(' ')
if len(components) != 3:
raise ValueError('Crontab must be three space-delimited components')
minutes, hours, weekdays = components
self.minutes = parse(minutes, 60)
self.hours = parse(hours, 24)
self.weekdays = parse(weekdays, 24)
def __repr__(self):
return '<Crontab: {}>'.format(self.schedule)
def next_run(self, current_time: datetime.datetime) -> datetime.datetime:
"""Given the current time, when is the next scheduled run?"""
# if next run is next day, get smallest hour, smallest minute
# if next run is today, future hour, get smallest minute
# if next run is today, this hour, get next greatest minute
next_run = datetime.datetime(current_time.year, current_time.month, current_time.day,
tzinfo=current_time.tzinfo)
weekday = current_time.isoweekday()
weekday = 0 if weekday == 7 else weekday # Move Sunday to day 0
if weekday in self.weekdays:
# could be a run today
if current_time.hour in self.hours:
# could be a run this hour
for minute in self.minutes:
if minute > current_time.minute:
# there is a run this hour
return next_run.replace(hour=current_time.hour, minute=minute)
# no run this hour, check future hours
for hour in self.hours:
if hour > current_time.hour:
# there is a run today
return next_run.replace(hour=hour, minute=self.minutes[0])
# no run today, look for next matching weekday
for day in range(1, 7):
next_run += datetime.timedelta(days=1)
weekday = next_run.isoweekday()
weekday = 0 if weekday == 7 else weekday # Move Sunday to day 0
if weekday in self.weekdays:
return next_run.replace(hour=self.hours[0], minute=self.minutes[0])
raise RuntimeError('No next run found for schedule {}'.format(self.schedule))
def parse(pattern: str, max_value: int):
"""Convert a string crontab component into a set of integers less than a given max"""
values = set()
for part in pattern.split(','):
fraction = part.split('/')
if len(fraction) == 1:
numerator, denominator = part, None
elif len(fraction) == 2:
numerator, denominator = fraction[0], int(fraction[1])
else:
raise ValueError('Expression {} should contain zero or one slash (/)'.format(part))
if numerator == '*':
lower, upper = 0, max_value
elif '-' in numerator:
lower, upper = numerator.split('-')
lower, upper = int(lower), int(upper) + 1
else:
lower, upper = int(numerator), int(numerator) + 1
if lower < 0 or upper > max_value:
raise ValueError('Expression {} is outside the range {}-{}'.format(
part, 0, max_value))
for x in range(lower, upper):
if denominator is None or x % denominator == 0:
values.add(x)
if not values:
raise ValueError('Expression {} gives no runs'.format(pattern))
return sorted(values)
def cron_validator(crontab: str):
try:
Crontab(crontab)
except ValueError as exc:
raise validators.ValidationError(
'Invalid crontab expression: {} ({})'.format(crontab, exc))
|
aclowes/yawn
|
yawn/utilities/cron.py
|
Python
|
mit
| 4,118
|
#!/usr/bin/env python
# encoding: utf-8
"""
pascals-triangle-ii.py
Created by Shuailong on 2016-02-20.
https://leetcode.com/problems/pascals-triangle-ii/.
"""
class Solution(object):
def getRow(self, rowIndex):
"""
:type rowIndex: int
:rtype: List[int]
"""
lastrow = []
row = []
for i in range(rowIndex+1):
row = [0]*(i+1)
for j in range(i+1):
if j == 0 or j == i:
row[j] = 1
else:
row[j] = lastrow[j] + lastrow[j-1]
lastrow = row
return row
def main():
solution = Solution()
print solution.getRow(5)
if __name__ == '__main__':
main()
|
Shuailong/Leetcode
|
solutions/pascals-triangle-ii.py
|
Python
|
mit
| 754
|
import Levenshtein
import json
from string_util import cleanString
'''
This script merges camp ids into the data from
./data/playaevents-camps-2013.json
OR ./results/camp_data_and_locations.json
using playaevents-events-2013
(The Playa Events API Events feed)
'''
# Threshold under which to discard partial string matches
MATCH_THRESHOLD = .7
camp_file = open('./results/camp_data_and_locations.json')
events_file = open('./data/playaevents-events-2013.json')
camp_json = json.loads(camp_file.read())
events_json = json.loads(events_file.read())
# Some entries in event_data are null, remove them before writing final json
null_camp_indexes = []
# camps without a match, for manual inspection
unmatched_camps = []
matched_camps = []
# match name fields between entries in two files
for index, camp in enumerate(camp_json):
max_match = 0
max_match_event = ''
if camp != None and 'name' in camp:
for event in events_json:
if 'hosted_by_camp' in event:
match = Levenshtein.ratio(cleanString(camp['name']), cleanString(event['hosted_by_camp']['name']))
if match > max_match:
max_match = match
max_match_event = event
#print "Best match for " + event['name'] + " : " + max_match_camp['name'] + " (confidence: " + str(max_match) + ")"
if max_match > MATCH_THRESHOLD:
# Match found
camp['id'] = max_match_event['hosted_by_camp']['id']
matched_camps.append(camp)
else:
unmatched_camps.append(camp)
elif not 'name' in camp:
null_camp_indexes.append(index)
# To remove null entries from list, we must move in reverse
# to preserve list order as we remove
null_camp_indexes.reverse()
for index in null_camp_indexes:
camp_json.pop(index)
unmatched_camps_file = open('./results/unmatched_camps_id.json', 'w')
unmatched_camps_file.write(json.dumps(unmatched_camps, sort_keys=True, indent=4))
result_file = open('./results/camp_data_and_locations_ids.json', 'w')
result_file.write(json.dumps(camp_json, sort_keys=True, indent=4))
if len(unmatched_camps) > 0:
print "Matches not found for " + str(len(unmatched_camps)) + " camps"
print "Matched: "+str(len(matched_camps))
|
Burning-Man-Earth/iBurn-Data
|
scripts/2013/playa_data/merge_camp_id_from_events.py
|
Python
|
mit
| 2,285
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from base import GAETestCase
from datetime import datetime, date
from decimal import Decimal
from question_app.question_model import Question
from routes.questions.edit import index, save
from mommygae import mommy
from tekton.gae.middleware.redirect import RedirectResponse
class IndexTests(GAETestCase):
def test_success(self):
question = mommy.save_one(Question)
template_response = index(question.key.id())
self.assert_can_render(template_response)
class EditTests(GAETestCase):
def test_success(self):
question = mommy.save_one(Question)
old_properties = question.to_dict()
redirect_response = save(question.key.id(), name='name_string')
self.assertIsInstance(redirect_response, RedirectResponse)
edited_question = question.key.get()
self.assertEquals('name_string', edited_question.name)
self.assertNotEqual(old_properties, edited_question.to_dict())
def test_error(self):
question = mommy.save_one(Question)
old_properties = question.to_dict()
template_response = save(question.key.id())
errors = template_response.context['errors']
self.assertSetEqual(set(['name']), set(errors.keys()))
self.assertEqual(old_properties, question.key.get().to_dict())
self.assert_can_render(template_response)
|
raphaelrpl/portal
|
backend/test/question_tests/question_edit_tests.py
|
Python
|
mit
| 1,434
|
from django.conf.urls import patterns, url
urlpatterns = patterns('parcels.views',
url(r'^$', 'index', name='index'),
url(r'^list/$', 'list_parcels', name='list_parcels'),
url(r'^add_shipment/$', 'add_parcel'),
url(r'^shipment/(?P<shipment_id>\d+)/$', 'shipment_info', name="single_shipment"),
url(r'^shipment/(?P<shipment_id>\d+)/edit/$', 'shipment_edit', name="edit_shipment"),
url(r'^search/$', 'search_parcels', name='search_parcels'),
)
|
festlv/latvijas-pasta-toolis
|
parcels/urls.py
|
Python
|
mit
| 467
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django
from django.forms import Select
from django.forms.models import modelform_factory
from django.test import TestCase
from django.utils import translation
from django.utils.encoding import force_text
try:
from unittest import skipIf
except:
from django.utils.unittest import skipIf
from django_countries import fields, countries
from django_countries.tests import forms
from django_countries.tests.models import Person, AllowNull, en_zed
skipUnlessLegacy = skipIf(
django.VERSION >= (1, 5),
"Legacy tests only necessary in Django < 1.5")
class TestCountryField(TestCase):
def test_logic(self):
person = Person(name='Chris Beaven', country='NZ')
self.assertEqual(person.country, 'NZ')
self.assertNotEqual(person.country, 'ZZ')
self.assertTrue(person.country)
person.country = ''
self.assertFalse(person.country)
def test_only_from_instance(self):
self.assertRaises(AttributeError, lambda: Person.country)
@skipIf(
django.VERSION < (1, 7), "Field.deconstruct introduced in Django 1.7")
def test_deconstruct(self):
field = Person._meta.get_field('country')
self.assertEqual(
field.deconstruct(),
('country', 'django_countries.fields.CountryField', [],
{'max_length': 2}))
def test_text(self):
person = Person(name='Chris Beaven', country='NZ')
self.assertEqual(force_text(person.country), 'NZ')
def test_name(self):
person = Person(name='Chris Beaven', country='NZ')
self.assertEqual(person.country.name, 'New Zealand')
def test_flag(self):
person = Person(name='Chris Beaven', country='NZ')
with self.settings(STATIC_URL='/static-assets/'):
self.assertEqual(
person.country.flag, '/static-assets/flags/nz.gif')
def test_custom_field_flag_url(self):
person = Person(name='Chris Beaven', country='NZ', other_country='US')
self.assertEqual(
person.other_country.flag, '//flags.example.com/us.gif')
def test_COUNTRIES_FLAG_URL_setting(self):
# Custom relative url
person = Person(name='Chris Beaven', country='NZ')
with self.settings(COUNTRIES_FLAG_URL='img/flag-{code_upper}.png',
STATIC_URL='/static-assets/'):
self.assertEqual(
person.country.flag, '/static-assets/img/flag-NZ.png')
# Custom absolute url
with self.settings(COUNTRIES_FLAG_URL='https://flags.example.com/'
'{code_upper}.PNG'):
self.assertEqual(
person.country.flag, 'https://flags.example.com/NZ.PNG')
def test_blank(self):
person = Person.objects.create(name='The Outsider')
self.assertEqual(person.country, '')
person = Person.objects.get(pk=person.pk)
self.assertEqual(person.country, '')
def test_null(self):
person = AllowNull.objects.create(country=None)
self.assertIsNone(person.country.code)
person = AllowNull.objects.get(pk=person.pk)
self.assertIsNone(person.country.code)
def test_len(self):
person = Person(name='Chris Beaven', country='NZ')
self.assertEqual(len(person.country), 2)
person = Person(name='The Outsider', country=None)
self.assertEqual(len(person.country), 0)
def test_lookup_text(self):
Person.objects.create(name='Chris Beaven', country='NZ')
Person.objects.create(name='Pavlova', country='NZ')
Person.objects.create(name='Killer everything', country='AU')
lookup = Person.objects.filter(country='NZ')
names = lookup.order_by('name').values_list('name', flat=True)
self.assertEqual(list(names), ['Chris Beaven', 'Pavlova'])
def test_lookup_country(self):
Person.objects.create(name='Chris Beaven', country='NZ')
Person.objects.create(name='Pavlova', country='NZ')
Person.objects.create(name='Killer everything', country='AU')
oz = fields.Country(code='AU', flag_url='')
lookup = Person.objects.filter(country=oz)
names = lookup.values_list('name', flat=True)
self.assertEqual(list(names), ['Killer everything'])
def test_save_empty_country(self):
Person.objects.create(name='The Outsider')
person = Person.objects.get()
self.assertEqual(person.country, '')
def test_create_modelform(self):
Form = modelform_factory(Person, fields=['country'])
form_field = Form().fields['country']
self.assertTrue(isinstance(form_field.widget, Select))
def test_render_form(self):
Form = modelform_factory(Person, fields=['country'])
Form().as_p()
class TestCountryObject(TestCase):
def test_hash(self):
country = fields.Country(code='XX', flag_url='')
self.assertEqual(hash(country), hash('XX'))
def test_repr(self):
country1 = fields.Country(code='XX')
country2 = fields.Country(code='XX', flag_url='')
self.assertEqual(
repr(country1),
'Country(code={0})'.format(repr('XX')))
self.assertEqual(
repr(country2),
'Country(code={0}, flag_url={1})'.format(repr('XX'), repr('')))
def test_flag_on_empty_code(self):
country = fields.Country(code='', flag_url='')
self.assertEqual(country.flag, '')
def test_ioc_code(self):
country = fields.Country(code='NL', flag_url='')
self.assertEqual(country.ioc_code, 'NED')
def test_country_from_ioc_code(self):
country = fields.Country.country_from_ioc('NED')
self.assertEqual(country, fields.Country('NL', flag_url=''))
def test_country_from_blank_ioc_code(self):
country = fields.Country.country_from_ioc('')
self.assertIsNone(country)
def test_country_from_nonexistence_ioc_code(self):
country = fields.Country.country_from_ioc('XXX')
self.assertIsNone(country)
def test_alpha3(self):
country = fields.Country(code='BN')
self.assertEqual(country.alpha3, 'BRN')
def test_alpha3_invalid(self):
country = fields.Country(code='XX')
self.assertEqual(country.alpha3, '')
def test_numeric(self):
country = fields.Country(code='BN')
self.assertEqual(country.numeric, 96)
def test_numeric_padded(self):
country = fields.Country(code='AL')
self.assertEqual(country.numeric_padded, '008')
country = fields.Country(code='BN')
self.assertEqual(country.numeric_padded, '096')
country = fields.Country(code='NZ')
self.assertEqual(country.numeric_padded, '554')
def test_numeric_invalid(self):
country = fields.Country(code='XX')
self.assertEqual(country.numeric, None)
def test_numeric_padded_invalid(self):
country = fields.Country(code='XX')
self.assertEqual(country.numeric_padded, None)
class TestModelForm(TestCase):
def test_translated_choices(self):
lang = translation.get_language()
translation.activate('eo')
form = forms.PersonForm()
try:
# This is just to prove that the language changed.
self.assertEqual(list(countries)[0][1], 'Afganio')
# If the choices aren't lazy, this wouldn't be translated. It's the
# second choice because the first one is the initial blank option.
self.assertEqual(
form.fields['country'].choices[1][1], 'Afganio')
self.assertEqual(
form.fields['country'].widget.choices[1][1], 'Afganio')
finally:
translation.activate(lang)
@skipUnlessLegacy
def test_legacy_default(self):
self.assertEqual(
forms.LegacyForm.base_fields['default'].initial, 'AU')
@skipUnlessLegacy
def test_legacy_default_callable(self):
self.assertEqual(
forms.LegacyForm.base_fields['default_callable'].initial, en_zed)
form = forms.LegacyForm()
self.assertEqual(form['default_callable'].value(), 'NZ')
@skipUnlessLegacy
def test_legacy_empty_value(self):
self.assertEqual(
forms.LegacyForm.base_fields['default'].empty_value, None)
self.assertEqual(
forms.LegacyForm.base_fields['default_callable'].empty_value, '')
|
velfimov/django-countries
|
django_countries/tests/test_fields.py
|
Python
|
mit
| 8,503
|
from utils import CanadianScraper, CanadianPerson as Person
import json
import re
import requests
COUNCIL_PAGE = 'http://winnipeg.ca/council/'
class WinnipegPersonScraper(CanadianScraper):
def scrape(self):
# https://winnipeg.ca/council/wards/includes/wards.js
# var COUNCIL_API = 'https://data.winnipeg.ca/resource/r4tk-7dip.json';
api_url = 'https://data.winnipeg.ca/resource/r4tk-7dip.json'
data = json.loads(requests.get(api_url).content)
page = self.lxmlize(COUNCIL_PAGE, 'utf-8')
councillors = page.xpath('//div[@class="box"]')
assert len(councillors), 'No councillors found'
for councillor in councillors:
role = councillor.xpath('.//div[@class="insideboxtitle"]/text()')[0].strip()
name = councillor.xpath('.//p[@class="insideboxtext"]/text()')[0]
image = councillor.xpath('.//@src')[0]
if 'Councillor' in name:
role = 'Councillor'
name = name.replace('Councillor ', '')
url = api_url
item = next((item for item in data if item['person'] == name and item['current_council']), None)
if item is None:
raise Exception(name)
district = item['name_english'].replace(' - ', '—') # hyphen, m-dash
email = item['email_link']
voice = item['phone']
fax = item['fax']
p = Person(primary_org='legislature', name=name, district=district, role=role)
p.add_source(COUNCIL_PAGE)
p.add_source(url)
if not image.endswith('nophoto.jpg'):
p.image = image
p.add_contact('email', parse_email(email))
p.add_contact('voice', voice, 'legislature')
p.add_contact('fax', fax, 'legislature')
yield p
def parse_email(email):
return re.search('=([^&]+)', email).group(1) + '@winnipeg.ca'
|
opencivicdata/scrapers-ca
|
ca_mb_winnipeg/people.py
|
Python
|
mit
| 1,948
|
import tensorflow as tf
STATE = tf.Variable(0, name='counter')
#print STATE.name
ONE = tf.constant(1)
new_value = tf.add(STATE, ONE)
update = tf.assign(STATE, new_value)
init = tf.initialize_all_variables() # must have if define variable
with tf.Session() as SESS:
SESS.run(init)
for _ in range(3):
SESS.run(update)
print SESS.run(STATE)
|
zhaotai/tensorflow-practice
|
variable.py
|
Python
|
mit
| 366
|