repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
Mega-DatA-Lab/mxnet
|
tests/python/unittest/test_metric.py
|
Python
|
apache-2.0
| 1,757
| 0.002277
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
import json
def check_metric(metric, *args, *
|
*kwargs):
metric = mx.metric.create(metric, *args, **kwargs)
str_metric = json.dumps(metric.get_config())
metric2 = mx.metric.create(str_metric)
assert metric.get_config() == metric2.get_config()
def test_metrics():
check_metric('acc', axis=0)
check_met
|
ric('f1')
check_metric('perplexity', -1)
check_metric('pearsonr')
check_metric('nll_loss')
composite = mx.metric.create(['acc', 'f1'])
check_metric(composite)
def test_nll_loss():
metric = mx.metric.create('nll_loss')
pred = mx.nd.array([[0.2, 0.3, 0.5], [0.6, 0.1, 0.3]])
label = mx.nd.array([2, 1])
metric.update([label], [pred])
_, loss = metric.get()
expected_loss = 0.0
expected_loss = -(np.log(pred[0][2].asscalar()) + np.log(pred[1][1].asscalar())) / 2
assert loss == expected_loss
if __name__ == '__main__':
import nose
nose.runmodule()
|
adamcharnock/lightbus
|
lightbus/client/subclients/event.py
|
Python
|
apache-2.0
| 12,494
| 0.003764
|
import asyncio
import inspect
import logging
from typing import List, Tuple, Callable, NamedTuple
from lightbus.schema.schema import Parameter
from lightbus.message import EventMessage
from lightbus.client.subclients.base import BaseSubClient
from lightbus.client.utilities import validate_event_or_rpc_name, queue_exc
|
eption_checker, OnError
from lightbus.client.validator import validate_outgoing, validate_incoming
from lightbus.exceptions import (
UnknownApi,
EventNotFound,
Inva
|
lidEventArguments,
InvalidEventListener,
ListenersAlreadyStarted,
DuplicateListenerName,
)
from lightbus.log import L, Bold
from lightbus.client.commands import (
SendEventCommand,
AcknowledgeEventCommand,
ConsumeEventsCommand,
CloseCommand,
)
from lightbus.utilities.async_tools import run_user_provided_callable, cancel_and_log_exceptions
from lightbus.utilities.internal_queue import InternalQueue
from lightbus.utilities.casting import cast_to_signature
from lightbus.utilities.deforming import deform_to_bus
from lightbus.utilities.singledispatch import singledispatchmethod
logger = logging.getLogger(__name__)
class EventClient(BaseSubClient):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._event_listeners: List[Listener] = []
self._event_listener_tasks = set()
self._listeners_started = False
async def fire_event(
self, api_name, name, kwargs: dict = None, options: dict = None
) -> EventMessage:
kwargs = kwargs or {}
try:
api = self.api_registry.get(api_name)
except UnknownApi:
raise UnknownApi(
"Lightbus tried to fire the event {api_name}.{name}, but no API named {api_name}"
" was found in the registry. An API being in the registry implies you are an"
" authority on that API. Therefore, Lightbus requires the API to be in the registry"
" as it is a bad idea to fire events on behalf of remote APIs. However, this could"
" also be caused by a typo in the API name or event name, or be because the API"
" class has not been registered using bus.client.register_api(). ".format(
**locals()
)
)
validate_event_or_rpc_name(api_name, "event", name)
try:
event = api.get_event(name)
except EventNotFound:
raise EventNotFound(
"Lightbus tried to fire the event {api_name}.{name}, but the API {api_name} does"
" not seem to contain an event named {name}. You may need to define the event, you"
" may also be using the incorrect API. Also check for typos.".format(**locals())
)
p: Parameter
parameter_names = {p.name if isinstance(p, Parameter) else p for p in event.parameters}
required_parameter_names = {
p.name if isinstance(p, Parameter) else p
for p in event.parameters
if getattr(p, "is_required", True)
}
if required_parameter_names and not required_parameter_names.issubset(set(kwargs.keys())):
raise InvalidEventArguments(
"Missing required arguments when firing event {}.{}. Attempted to fire event with "
"{} arguments: {}. Event requires {}: {}".format(
api_name,
name,
len(kwargs),
sorted(kwargs.keys()),
len(parameter_names),
sorted(parameter_names),
)
)
extra_arguments = set(kwargs.keys()) - parameter_names
if extra_arguments:
raise InvalidEventArguments(
"Unexpected argument supplied when firing event {}.{}. Attempted to fire event with"
" {} arguments: {}. Unexpected argument(s): {}".format(
api_name, name, len(kwargs), sorted(kwargs.keys()), sorted(extra_arguments),
)
)
kwargs = deform_to_bus(kwargs)
event_message = EventMessage(
api_name=api.meta.name, event_name=name, kwargs=kwargs, version=api.meta.version
)
validate_outgoing(self.config, self.schema, event_message)
await self.hook_registry.execute("before_event_sent", event_message=event_message)
logger.info(L("📤 Sending event {}.{}".format(Bold(api_name), Bold(name))))
await self.producer.send(SendEventCommand(message=event_message, options=options)).wait()
await self.hook_registry.execute("after_event_sent", event_message=event_message)
return event_message
def listen(
self,
events: List[Tuple[str, str]],
listener: Callable,
listener_name: str,
options: dict = None,
on_error: OnError = OnError.SHUTDOWN,
):
if self._listeners_started:
# We are actually technically able to support starting listeners after worker
# startup, but it seems like it is a bad idea and a bit of an edge case.
# We may revisit this if sufficient demand arises.
raise ListenersAlreadyStarted(
"You are trying to register a new listener after the worker has started running."
" Listeners should be setup in your @bus.client.on_start() hook, in your bus.py"
" file."
)
sanity_check_listener(listener)
for listener_api_name, _ in events:
duplicate_listener = self.get_event_listener(listener_api_name, listener_name)
if duplicate_listener:
raise DuplicateListenerName(
f"A listener with name '{listener_name}' is already registered for API"
f" '{listener_api_name}'. You cannot have multiple listeners with the same name"
" for a given API. Rename one of your listeners to resolve this problem."
)
for api_name, name in events:
validate_event_or_rpc_name(api_name, "event", name)
self._event_listeners.append(
Listener(
callable=listener,
options=options or {},
events=events,
name=listener_name,
on_error=on_error,
)
)
def get_event_listener(self, api_name: str, listener_name: str):
for listener in self._event_listeners:
if listener.name == listener_name:
for listener_api_name, _ in listener.events:
if listener_api_name == api_name:
return listener
return None
async def _on_message(
self, event_message: EventMessage, listener: Callable, options: dict, on_error: OnError
):
# TODO: Check events match those requested
logger.info(
L(
"📩 Received event {}.{} with ID {}".format(
Bold(event_message.api_name), Bold(event_message.event_name), event_message.id
)
)
)
validate_incoming(self.config, self.schema, event_message)
await self.hook_registry.execute("before_event_execution", event_message=event_message)
if self.config.api(event_message.api_name).cast_values:
parameters = cast_to_signature(parameters=event_message.kwargs, callable=listener)
else:
parameters = event_message.kwargs
# Call the listener.
# Pass the event message as a positional argument,
# thereby allowing listeners to have flexibility in the argument names.
# (And therefore allowing listeners to use the `event` parameter themselves)
if on_error == OnError.SHUTDOWN:
# Run the callback in the queue_exception_checker(). This will
# put any errors into Lightbus' error queue, and therefore
# cause a shutdown
await queue_exception_checker(
run_user_provided_callable(listener, args=[event_message], kwargs=parameters),
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractShibbsdenCom.py
|
Python
|
bsd-3-clause
| 714
| 0.029412
|
def extractShibbsdenCom(item):
'''
Parser for 'shibbsden.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('GOOD CHILD', 'Reborn as a Good Child',
|
'translated'),
('LUCKY CAT', 'I am the Lucky Cat of an MMORPG', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfi
|
x=postfix, tl_type=tl_type)
return False
|
ovresko/erpnext
|
erpnext/patches/v8_0/update_sales_cost_in_project.py
|
Python
|
gpl-3.0
| 303
| 0.026403
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("projects", "doctype", "project")
frappe.db.sql("""
update `tabProject` p
set total_sales_amount = ifnull((select sum(base_grand_total)
from `tabSales Order` where project=p.name a
|
nd docstatus=1), 0)
|
""")
|
sdolemelipone/django-crypsis
|
crypsis/tables.py
|
Python
|
gpl-3.0
| 4,778
| 0.000209
|
import logging
from django.utils.html import format_html
import django_tables2 as tables
from django_tables2.rows import BoundPinnedRow, BoundRow
logger = logging.getLogger(__name__)
# A cheat to force BoundPinnedRows to use the same rendering as BoundRows
# otherwise links don't work
# BoundPinnedRow._get_and_render_with = BoundRow._get_and_render_with
class MultiLinkColumn(tables.RelatedLinkColumn):
"""
Like RelatedLinkColumn but allows multiple choices of accessor to be
rendered in a hierarchy, e.g.
accessors = ['foo.bar', 'baz.bof']
text = '{instance.number}: {instance}'
In this case if 'foo.bar' resolves, it will be rendered. Otherwise
'baz.bof' will be tested to resolve, and so on. If nothing renders,
the column will be blank. The text string will resolve using instance.
"""
def __init__(self, accessors, **kwargs):
"""Here we force order by the accessors. By default MultiLinkColumns
have empty_values: () to force calculation every time.
"""
defaults = {
'order_by': accessors,
'empty_values': (),
}
defaults.update(**kwargs)
super().__init__(**defaults)
self.accessors = [tables.A(a) for a in accessors]
def compose_url(self, record, bound_column):
"""Resolve the first accessor which resolves. """
for a in self.accessors:
try:
return a.resolve(record).get_absolute_url()
except (ValueError, AttributeError):
continue
return ""
def text_value(self, record, value):
"""If self.text is set, it will be used as a format string for the
instance returned by the accessor with the keyword `instance`.
"""
for a in self.accessors:
try:
instance = a.resolve(record)
if instance is None:
raise ValueError
except ValueError:
continue
# Use self.text as a format string
if self.text:
return self.text.format(instance=instance, record=record,
value=value)
else:
return str(instance)
# Finally if no accessors were resolved, return value or a blank string
# return super().text_value(record, value)
return value or ""
class XeroLinkColumn(tables.Column):
"""Renders a badge link to the objects record in xero."""
def render(self, value, record=None):
if record.xero_id:
return format_html(
'<span class="badge progress-bar-info">'
'<a class="alert-link" role="button" target="_blank" '
'href="{href}">View in Xero</a></span>',
href=record.get_xero_url()
)
class Base
|
Table(tables.Table):
class Meta:
attrs = {"class": "table table-bordered table-striped table-hover "
"table-condensed"}
# @classmethod
# def set_header_color(cls, color):
# """
# Sets all column headers to h
|
ave this background colour.
# """
# for column in cls.base_columns.values():
# try:
# column.attrs['th'].update(
# {'style': f'background-color:{color};'})
# except KeyError:
# column.attrs['th'] = {'style': f'background-color:{color};'}
def set_header_color(self, color):
"""
Sets all column headers to have this background colour.
"""
for column in self.columns.columns.values():
try:
column.column.attrs['th'].update(
{'style': f'background-color:{color};'})
except KeyError:
column.column.attrs['th'] = {
'style': f'background-color:{color};'}
class ModelTable(BaseTable):
class Meta(BaseTable.Meta):
exclude = ('id',)
class CurrencyColumn(tables.Column):
"""Render a table column as GBP."""
def render(self, value):
return f'£{value:,.2f}'
class NumberColumn(tables.Column):
"""Only render decimal places if necessary."""
def render(self, value):
if value is not None:
return f'{value:n}'
class ColorColumn(tables.Column):
"""Render the colour in a box."""
def __init__(self, *args, **kwargs):
"""This will ignore other attrs passed in."""
kwargs.setdefault('attrs', {'td': {'class': "small-width text-center"}})
super().__init__(*args, **kwargs)
def render(self, value):
if value:
return format_html(
'<div class="color-box" style="background:{};"></div>', value)
|
gam17/QAD
|
cmd/qad_array_maptool.py
|
Python
|
gpl-3.0
| 9,990
| 0.020048
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
QAD Quantum Aided Design plugin ok
classe per gestire il map tool in ambito del comando array
-------------------
begin : 2016-05-31
copyright : iiiii
email : hhhhh
developers : bbbbb aaaaa ggggg
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; eithe
|
r version 2 of the License, or *
* (at your option) any later version. *
*
|
*
***************************************************************************/
"""
from .. import qad_utils
from ..qad_variables import QadVariables
from ..qad_getpoint import QadGetPoint, QadGetPointSelectionModeEnum, QadGetPointDrawModeEnum
from ..qad_highlight import QadHighlight
from ..qad_dim import QadDimStyles, appendDimEntityIfNotExisting
from ..qad_entity import QadCacheEntitySetIterator, QadEntityTypeEnum
from .. import qad_array_fun
#===============================================================================
# Qad_array_maptool_ModeEnum class.
#===============================================================================
class Qad_array_maptool_ModeEnum():
# non si richiede niente
NONE = 0
# si richiede il punto base
ASK_FOR_BASE_PT = 1
# si richiede il primo punto per la distanza tra colonne
ASK_FOR_COLUMN_SPACE_FIRST_PT = 2
# si richiede il primo punto per la dimensione della cella
ASK_FOR_1PT_CELL = 3
# si richiede il psecondo punto per la dimensione della cella
ASK_FOR_2PT_CELL = 4
# si richiede il primo punto per la distanza tra righe
ASK_FOR_ROW_SPACE_FIRST_PT = 5
#===============================================================================
# Qad_array_maptool class
#===============================================================================
class Qad_array_maptool(QadGetPoint):
def __init__(self, plugIn):
QadGetPoint.__init__(self, plugIn)
self.cacheEntitySet = None
self.basePt = None
self.arrayType = None
self.distanceBetweenRows = None
self.distanceBetweenCols = None
self.itemsRotation = None
# serie rettangolare
self.rectangleAngle = None
self.rectangleCols = None
self.rectangleRows = None
self.firstPt = None
# serie traiettoria
self.pathTangentDirection = None
self.pathRows = None
self.pathItemsNumber = None
self.pathPolyline = None
# serie polare
self.centerPt = None
self.polarItemsNumber = None
self.polarAngleBetween = None
self.polarRows = None
self.__highlight = QadHighlight(self.canvas)
def hidePointMapToolMarkers(self):
QadGetPoint.hidePointMapToolMarkers(self)
self.__highlight.hide()
def showPointMapToolMarkers(self):
QadGetPoint.showPointMapToolMarkers(self)
self.__highlight.show()
def clear(self):
QadGetPoint.clear(self)
self.__highlight.reset()
self.mode = None
#============================================================================
# doRectangleArray
#============================================================================
def doRectangleArray(self):
self.__highlight.reset()
dimElaboratedList = [] # lista delle quotature già elaborate
entityIterator = QadCacheEntitySetIterator(self.cacheEntitySet)
for entity in entityIterator:
qadGeom = entity.getQadGeom().copy() # così inizializzo le info qad
# verifico se l'entità appartiene ad uno stile di quotatura
dimEntity = QadDimStyles.getDimEntity(entity)
if dimEntity is not None:
if appendDimEntityIfNotExisting(dimElaboratedList, dimEntity) == False: # quota già elaborata
continue
entity = dimEntity
if qad_array_fun.arrayRectangleEntity(self.plugIn, entity, self.basePt, self.rectangleRows, self.rectangleCols, \
self.distanceBetweenRows, self.distanceBetweenCols, self.rectangleAngle, self.itemsRotation,
False, self.__highlight) == False:
return
#============================================================================
# doPathArray
#============================================================================
def doPathArray(self):
self.__highlight.reset()
dimElaboratedList = [] # lista delle quotature già elaborate
entityIterator = QadCacheEntitySetIterator(self.cacheEntitySet)
for entity in entityIterator:
qadGeom = entity.getQadGeom().copy() # così inizializzo le info qad
# verifico se l'entità appartiene ad uno stile di quotatura
dimEntity = QadDimStyles.getDimEntity(entity)
if dimEntity is not None:
if appendDimEntityIfNotExisting(dimElaboratedList, dimEntity) == False: # quota già elaborata
continue
entity = dimEntity
if qad_array_fun.arrayPathEntity(self.plugIn, entity, self.basePt, self.pathRows, self.pathItemsNumber, \
self.distanceBetweenRows, self.distanceBetweenCols, self.pathTangentDirection, self.itemsRotation, \
self.pathPolyline, self.distanceFromStartPt, \
False, self.__highlight) == False:
return
#============================================================================
# doPolarArray
#============================================================================
def doPolarArray(self):
self.__highlight.reset()
dimElaboratedList = [] # lista delle quotature già elaborate
entityIterator = QadCacheEntitySetIterator(self.cacheEntitySet)
for entity in entityIterator:
qadGeom = entity.getQadGeom().copy() # così inizializzo le info qad
# verifico se l'entità appartiene ad uno stile di quotatura
dimEntity = QadDimStyles.getDimEntity(entity)
if dimEntity is not None:
if appendDimEntityIfNotExisting(dimElaboratedList, dimEntity) == False: # quota già elaborata
continue
entity = dimEntity
if qad_array_fun.arrayPolarEntity(self.plugIn, entity, self.basePt, self.centerPt, self.polarItemsNumber, \
self.polarAngleBetween, self.polarRows, self.distanceBetweenRows, self.itemsRotation, \
False, self.__highlight) == False:
return
def canvasMoveEvent(self, event):
QadGetPoint.canvasMoveEvent(self, event)
# # noto il punto base si richiede il secondo punto
# if self.mode == Qad_array_maptool_ModeEnum.BASE_PT_KNOWN_ASK_FOR_COPY_PT:
# self.setCopiedGeometries(self.tmpPoint)
def activate(self):
QadGetPoint.activate(self)
self.__highlight.show()
def deactivate(self):
try: # necessario perché se si chiude QGIS parte questo evento nonostante non ci sia più l'oggetto maptool !
QadGetPoint.deactivate(self)
self.__highlight.hide()
except:
pass
def setMode(self, mode):
self.mode = mode
# non si richiede niente
if self.mode == Qad_array_maptool_ModeEnum.NO
|
Jwpe/alexandria-server
|
alexandria_server/permissions/authentication.py
|
Python
|
mit
| 2,060
| 0.000485
|
from django.conf import settings
from django.utils import timezone
from rest_framework import authentication
from rest_framework import exceptions
import datetime
import jwt
from .models import User
def generate_jwt(user):
payload = {
'user': user.pk,
'
|
exp': timezone.now() + datetime.timedelta(weeks=2),
'iat': timezone.now()
}
return jwt.encode(payload, settings.SECRET_KEY)
def decode_jwt(token):
return jwt.decode(to
|
ken, settings.SECRET_KEY)
class JWTAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
token = self._get_jwt_from_header(request)
try:
payload = decode_jwt(token)
except jwt.ExpiredSignature:
detail = 'Signature has expired.'
raise exceptions.AuthenticationFailed(detail=detail)
except jwt.DecodeError:
detail = 'Error decoding token.'
raise exceptions.AuthenticationFailed(detail=detail)
except jwt.InvalidTokenError:
raise exceptions.AuthenticationFailed()
user = self._get_user_by_id(payload)
return (user, token)
def _get_jwt_from_header(self, request):
auth_header = authentication.get_authorization_header(request)
if not auth_header:
detail = 'No Authorization header present.'
raise exceptions.AuthenticationFailed(detail=detail)
try:
prefix, token = auth_header.split()
except ValueError:
detail = 'Invalid Authorization header.'
raise exceptions.AuthenticationFailed(detail=detail)
return token
def _get_user_by_id(self, payload):
user_pk = payload['user']
try:
return User.objects.get(pk=user_pk)
except User.DoesNotExist:
detail = 'Invalid payload.'
raise exceptions.AuthenticationFailed(detail=detail)
|
tema-mbt/tema-adapterlib
|
adapterlib/ToolProtocolHTTP.py
|
Python
|
mit
| 8,508
| 0.012459
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import httplib
import urllib
import socket
class ToolProtocolHTTP(object):
"""
HTTP/HTTPS client for TEMA MBT protocol. Discusses with the TEMA test engine.
"""
# is client connected to the server
isConnected = False
def __init__(self):
self.host = "localhost"
self.port = 80
self.php_file = "temagui_http_proxy.php"
socket.setdefaulttimeout(1800)
def __del__(self):
if self.isConnected:
http_params = urllib.urlencode({"User" : s
|
elf.username, "Message" : 'CLOSE', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
def __requestreply(self,message ):
""" One http(s) request/reply.
Message: Message to send string.
Returns: Reply string.
"""
http_data = ''
try:
http_connection = None
|
if self.protocol == "HTTP":
http_connection = httplib.HTTPConnection(self.host, self.port)
elif self.protocol == "HTTPS":
http_connection = httplib.HTTPSConnection(self.host, self.port)
else:
return ''
http_connection.connect()
http_connection.request("POST", self.php_file, message , self.http_headers)
http_response = http_connection.getresponse()
http_data = http_response.read()
http_response.close()
http_connection.close()
except Exception, e:
http_data = ''
return http_data
def init(self, host, path, port, username, protocol):
""" Initialises connection. Sends HELO.
host: Server hostname.
path: path to http proxy in server.
port: port
username: wwwgui username
protocol: http/https
returns: Reply to ACK. On error returns ''
"""
self.http_headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
self.host = host
self.php_file = "/".join(["",path,"temagui_http_proxy.php"])
self.port = port
self.username = username
self.protocol = protocol.upper()
try:
# SEND HELO
http_params = urllib.urlencode({"User" : username, "Message" : 'HELO', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
self.isConnected = True
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
http_data = ''
self.isConnected = False
except Exception, e:
self.isConnected = False
return ''
return http_data
def getKeyword(self):
""" Gets keyword from testserver.
Sends GET to testserver and waits for reply.
Returns: Reply to GET. On error return ''
"""
http_data = ''
try:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'GET', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
self.isConnected = False
return 'ERROR'
if message == 'ERR':
# TODO: don't send ack.
http_data = self.__requestreply(http_params)
http_params = urllib.urlencode({"User" : self.username, "Message" : 'ACK', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
self.isConnected = False
return 'ERROR'
if not http_data.startswith("ACK"):
print http_data
return "ERROR"
else:
#http_data = http_data.partition("ACK")[2].strip()
http_data = http_data.split("ACK")[1].strip()
if http_data == '' or http_data == None:
http_data = ''
self.isConnected = False
except Exception, e:
self.isConnected = False
return http_data
def putResult(self, result):
""" Puts result to testserver.
result: True/False
returns: Reply message to PUT
"""
try:
if result:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'PUT', "Parameter" : 'true'})
else:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'PUT', "Parameter" : 'false'})
except Exception, e:
self.isConnected = False
return ''
try:
http_data = self.__requestreply(http_params)
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
self.isConnected = False
return ''
if http_data == '':
self.isConnected = False
except Exception, e:
self.isConnected = False
http_data = ''
return http_data
def log(self, msg):
""" Sends log message to testserver
returns: Reply to message.
"""
http_data = ''
try:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'LOG', "Parameter" : msg })
http_data = self.__requestreply(http_params)
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
self.isConnected = False
return ''
if http_data == '':
self.isConnected = False
except Exception, e:
self.isConnected = False
http_data = ''
return http_data
def bye(self):
""" Sends message BYE to testserver. """
http_data = ''
try:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'BYE', "Parameter" : 'None'})
http_data = self.__requestreply(http_params)
self.isConnected = False
except Exception, e:
self.isConnected = False
return ''
def hasConnection(self):
return self.isConnected
if
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/drsuapi/DsReplicaObjMetaData2Ctr.py
|
Python
|
gpl-2.0
| 880
| 0.007955
|
# encoding: utf-8
# module samba.dcerpc.drsuapi
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/drsuapi.so
# by generator 1.135
""" drsuapi DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class DsReplicaObjMetaData2Ctr(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more
|
): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pas
|
s
array = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
enumeration_context = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
lavjain/incubator-hawq
|
tools/bin/pythonSrc/pychecker-0.8.18/test_input/test33.py
|
Python
|
apache-2.0
| 80
| 0.0375
|
'd'
d
|
ef x():
print j
j = 0
def y():
for x in []:
|
print x
|
deepmind/sonnet
|
sonnet/src/scale_gradient.py
|
Python
|
apache-2.0
| 1,288
| 0.002329
|
# Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow op that scales gradient for backwards pass."""
from typing import Tuple
|
from sonnet.src import types
impor
|
t tensorflow as tf
@tf.custom_gradient
def scale_gradient(
t: tf.Tensor, scale: types.FloatLike
) -> Tuple[tf.Tensor, types.GradFn]:
"""Scales gradients for the backwards pass.
Args:
t: A Tensor.
scale: The scale factor for the gradient on the backwards pass.
Returns:
A Tensor same as input, with scaled backward gradient.
"""
def grad(dy: tf.Tensor) -> Tuple[tf.Tensor, None]:
"""Scaled gradient."""
return scale * dy, None
return t, grad
|
FlySorterLLC/SantaFeControlSoftware
|
Examples/ExampleYeastWorkspace.py
|
Python
|
gpl-2.0
| 585
| 0.018803
|
##
## This copyrighted software is distributed under the GPL v2.0 license.
## See the LICENSE file for more details.
##
## Yeast workspace configuration file
import numpy as np
import WorkspaceModules.YeastApplicatorPlate
import WorkspaceModules.YeastArena
import WorkspaceModules.YeastArena3x3
YeastWorkspace = { 'baseThickness': 2.93, 'yeastApplicatorPlate': WorkspaceModules.YeastApplicatorPlate.YeastApplicatorPlate(422.0, 247),
'yeastArena': WorkspaceModules.YeastArena.YeastArena(28
|
5, 139),
'yeastArena3x3': WorkspaceModules.YeastArena3x3.Ye
|
astArena3x3(124, 36) }
|
kamalx/edx-platform
|
common/djangoapps/student/views.py
|
Python
|
agpl-3.0
| 88,916
| 0.002553
|
"""
Student Views
"""
import datetime
import logging
import uuid
import time
import json
import warnings
from collections import defaultdict
from pytz import UTC
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseServerError, Http404)
from django.shortcuts import redirect
from django.utils.translation import ungettext
from django.utils.http import cookie_date, base36_to_int
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.response import TemplateResponse
from ratelimitbackend.exceptions import RateLimitException
from requests import HTTPError
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile, PendingNameChange,
PendingEmailChange, CourseEnrollment, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED)
from student.forms import AccountCreationForm, PasswordResetFormNoActive
from verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import get_certificate_url, get_active_web_certificate # pylint: disable=import-error
from dark_lang.models import DarkLangConfig
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore import ModuleStoreEnum
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_c
|
omment_common.models import Role
from external_auth.models import ExternalAuthMap
import external_auth.views
from external_
|
auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from bulk_email.models import Optout, CourseAuthorization
import shoppingcart
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import commit_on_success_with_read_committed
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from microsite_configuration import microsite
from util.password_policy_validators import (
validate_password_length, validate_password_complexity,
validate_password_dictionary
)
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
set_logged_in_cookie, check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page
)
from student.models import anonymous_id_for_user
from xmodule.error_module import ErrorDescriptor
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
# Note that this lives in openedx, so this dependency should be refactored.
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
# The course selection work is done in courseware.courses.
domain = settings.FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False
# do explicit check, because domain=None is valid
if domain is False:
domain = request.META.get('HTTP_HOST')
courses = get_courses(user, domain=domain)
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course. Returns a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
"""
if not course.may_certify():
return {}
return _cert_info(user, course, certificate_status_for_student(user, course.id), course_mode)
def reverification_info(course_enrollment_pairs, user, statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in status_list
Args:
course_enrollment_pairs (list): list of (course, enrollment) tuples
user (User): the user whose information we want
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
|
hglkrijger/WALinuxAgent
|
tests/ga/test_remoteaccess.py
|
Python
|
apache-2.0
| 5,846
| 0.005132
|
# Copyright Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import xml
from tests.tools import *
from azurelinuxagent.common.protocol.wire import *
from azurelinuxagent.common.osutil import get_osutil
class TestRemoteAccess(AgentTestCase):
def test_parse_remote_access(self):
data_str = load_data('wire/remote_access_single_account.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals("1", remote_access.incarnation)
self.assertEquals(1, len(remote_access.user_list.users), "User count does not match.")
self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.")
@patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state',
return_value=GoalState(load_data('wire/goal_state.xml')))
def test_update_remote_access_conf_no_remote_access(self, _):
protocol = WireProtocol('12.34.56.78')
goal_state = protocol.client.get_goal_state()
protocol.client.update_remote_access_conf(goal_state)
def test_parse_two_remote_access_accounts(self):
data_str = load_data('wire/remote_access_two_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals("1", remote_access.incarnation)
self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.")
self.assertEquals("testAccount1", remote_access.user_list.users[0].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019
|
-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.")
self.assertEquals("testAccount2", remote_access.u
|
ser_list.users[1].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.")
def test_parse_ten_remote_access_accounts(self):
data_str = load_data('wire/remote_access_10_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals(10, len(remote_access.user_list.users), "User count does not match.")
def test_parse_duplicate_remote_access_accounts(self):
data_str = load_data('wire/remote_access_duplicate_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.")
self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.")
self.assertEquals("testAccount", remote_access.user_list.users[1].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.")
def test_parse_zero_remote_access_accounts(self):
data_str = load_data('wire/remote_access_no_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals(0, len(remote_access.user_list.users), "User count does not match.")
@patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state',
return_value=GoalState(load_data('wire/goal_state_remote_access.xml')))
@patch('azurelinuxagent.common.protocol.wire.WireClient.fetch_config',
return_value=load_data('wire/remote_access_single_account.xml'))
@patch('azurelinuxagent.common.protocol.wire.WireClient.get_header_for_cert')
def test_update_remote_access_conf_remote_access(self, _1, _2, _3):
protocol = WireProtocol('12.34.56.78')
goal_state = protocol.client.get_goal_state()
protocol.client.update_remote_access_conf(goal_state)
self.assertNotEquals(None, protocol.client.remote_access)
self.assertEquals(1, len(protocol.client.remote_access.user_list.users))
self.assertEquals('testAccount', protocol.client.remote_access.user_list.users[0].name)
self.assertEquals('encryptedPasswordString', protocol.client.remote_access.user_list.users[0].encrypted_password)
def test_parse_bad_remote_access_data(self):
data = "foobar"
self.assertRaises(xml.parsers.expat.ExpatError, RemoteAccess, data)
|
mmmavis/lightbeam-bedrock-website
|
bedrock/legal/forms.py
|
Python
|
mpl-2.0
| 4,294
| 0.000699
|
# coding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django import forms
from lib.l10n_utils.dotlang import _, _lazy
from bedrock.mozorg.forms import HoneyPotWidget
FRAUD_REPORT_FILE_SIZE_LIMIT = 5242880 # 5MB
class FraudReportForm(forms.Form):
input_url = forms.URLField(
max_length=40,
required=True,
error_messages={
'required': _lazy(u'Please enter a URL.'),
},
widget=forms.TextInput(
attrs={
'size': 40,
'placeholder': _lazy(u'http://offendingsite.com'),
'class': 'required fill-width',
'required': 'required',
'aria-required': 'true',
}
)
)
input_category = forms.ChoiceField(
choices=(
('Charging for software', _lazy(u'Charging for software')),
('Collecting personal information', _lazy(u'Collecting personal information')),
('Domain name violation', _lazy(u'Domain name violation')),
('Logo misuse/modification', _lazy(
|
u'Logo misuse/modification')),
('Distributing modified Firefox/malware', _lazy(u'Distributing modified Firefox/malware')),
),
required=True,
error_messages={
'required': _lazy('Please select a category.'),
},
widget=forms.Select(
attrs={
'title': _lazy(u'Category'),
'class': 'required',
'required': 'required',
'aria-required': 't
|
rue',
}
)
)
input_product = forms.ChoiceField(
choices=(
('Firefox', _lazy(u'Firefox')),
('SeaMonkey', _lazy(u'SeaMonkey')),
('Thunderbird', _lazy(u'Thunderbird')),
('Other Mozilla Product/Project', _lazy(u'Other Mozilla Product/Project (specify)')),
),
required=True,
error_messages={
'required': _lazy('Please select a product.'),
},
widget=forms.Select(
attrs={
'title': _lazy(u'Product'),
'class': 'required',
'required': 'required',
'aria-required': 'true',
}
)
)
input_specific_product = forms.CharField(
max_length=80,
required=False,
widget=forms.TextInput(
attrs={
'size': 20,
'class': 'fill-width'
}
)
)
input_details = forms.CharField(
required=False,
widget=forms.Textarea(
attrs={
'rows': '',
'cols': '',
'class': 'fill-width'
}
)
)
input_attachment = forms.FileField(
required=False,
)
input_attachment_desc = forms.CharField(
max_length=40,
required=False,
widget=forms.Textarea(
attrs={
'rows': '',
'cols': '',
'class': 'fill-width'
}
)
)
input_email = forms.EmailField(
max_length=80,
required=False,
error_messages={
'invalid': _lazy(u'Please enter a valid email address'),
},
widget=forms.TextInput(
attrs={
'size': 20,
'class': 'fill-width'
}
)
)
superpriority = forms.BooleanField(widget=HoneyPotWidget, required=False)
def clean_input_attachment(self):
cleaned_data = super(FraudReportForm, self).clean()
attachment = cleaned_data.get("input_attachment")
if attachment:
if attachment._size > FRAUD_REPORT_FILE_SIZE_LIMIT:
raise forms.ValidationError(
_("Attachment must not exceed 5MB"))
return attachment
def clean_superpriority(self):
cleaned_data = super(FraudReportForm, self).clean()
honeypot = cleaned_data.pop('superpriority', None)
if honeypot:
raise forms.ValidationError(
_('Your submission could not be processed'))
|
nomaro/SickBeard_Backup
|
sickbeard/webserve.py
|
Python
|
gpl-3.0
| 155,575
| 0.006421
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
import time
import urllib
import re
import threading
import datetime
import random
import locale
from Cheetah.Template import Template
import cherrypy.lib
import sickbeard
from sickbeard import config, sab
from sickbeard import clients
from sickbeard import history, notifiers, processTV
from sickbeard import ui
from sickbeard import logger, helpers, exceptions, classes, db
from sickbeard import encodingKludge as ek
from sickbeard import search_queue
from sickbeard import image_cache
from sickbeard import scene_exceptions
from sickbeard import naming
from sickbeard import subtitles
from sickbeard.providers import newznab
from sickbeard.common import Quality, Overview, statusStrings
from sickbeard.common import SNATCHED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED
from sickbeard.exceptions import ex
from sickbeard.webapi import Api
from lib.tvdb_api import tvdb_api
from lib.dateutil import tz
import network_timezones
import subliminal
try:
import json
except ImportError:
from lib import simplejson as json
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from sickbeard import browser
class PageTemplate (Template):
def __init__(self, *args, **KWs):
KWs['file'] = os.path.join(sickbeard.PROG_DIR, "data/interfaces/default/", KWs['file'])
super(PageTemplate, self).__init__(*args, **KWs)
self.sbRoot = sickbeard.WEB_ROOT
self.sbHttpPort = sickbeard.WEB_PORT
self.sbHttpsPort = sickbeard.WEB_PORT
self.sbHttpsEnabled = sickbeard.ENABLE_HTTPS
if cherrypy.request.headers['Host'][0] == '[':
self.sbHost = re.match("^\[.*\]", cherrypy.request.headers['Host'], re.X|re.M|re.S).group(0)
else:
self.sbHost = re.match("^[^:]+", cherrypy.request.headers['Host'], re.X|re.M|re.S).group(0)
self.projectHomePage = "http://code.google.com/p/sickbeard/"
if sickbeard.NZBS and sickbeard.NZBS_UID and sickbeard.NZBS_HASH:
logger.log(u"NZBs.org has been replaced, please check the config to configure the new provider!", logger.ERROR)
ui.notifications.error("NZBs.org Config Update", "NZBs.org has a new site. Please <a href=\""+sickbeard.WEB_ROOT+"/config/providers\">update your config</a> with the api key from <a href=\"http://nzbs.org/login\">http://nzbs.org</a> and then disable the old NZBs.org provider.")
if "X-Forwarded-Host" in cherrypy.request.headers:
self.sbHost = cherrypy.request.headers['X-Forwarded-Host']
if "X-Forwarded-Port" in cherrypy.request.headers:
self.sbHttpPort = cherrypy.request.headers['X-Forwarded-Port']
self.sbHttpsPort = self.sbHttpPort
if "X-Forwarded-Proto" in cherrypy.request.headers:
self.sbHttpsEnabled = True if cherrypy.request.headers['X-Forwarded-Proto'] == 'https' else False
logPageTitle = 'Logs & Errors'
if len(classes.ErrorViewer.errors):
logPageTitle += ' ('+str(len(classes.ErrorViewer.errors))+')'
self.logPageTitle = logPageTitle
self.sbPID = str(sickbeard.PID)
self.menu = [
{ 'title': 'Home', 'key': 'home' },
{ 'title': 'Coming Episodes', 'key': 'comingEpisodes' },
{ 'title': 'History', 'key': 'history' },
{ 'title': 'Manage', 'key': 'manage' },
{ 'title': 'Config', 'key': 'config' },
{ 'title': logPageTitle, 'key': 'errorlogs' },
]
def redirect(abspath, *args, **K
|
Ws):
assert abspath[0] == '/'
raise cherrypy.HTTPRedirect(sickbeard.WEB_ROOT + abspath, *args, **KWs)
class TVDBWebUI:
def __init__(self, config, log=None):
self.config = config
self.log = l
|
og
def selectSeries(self, allSeries):
searchList = ",".join([x['id'] for x in allSeries])
showDirList = ""
for curShowDir in self.config['_showDir']:
showDirList += "showDir="+curShowDir+"&"
redirect("/home/addShows/addShow?" + showDirList + "seriesList=" + searchList)
def _munge(string):
return unicode(string).encode('utf-8', 'xmlcharrefreplace')
def _genericMessage(subject, message):
t = PageTemplate(file="genericMessage.tmpl")
t.submenu = HomeMenu()
t.subject = subject
t.message = message
return _munge(t)
def _getEpisode(show, season, episode):
if show == None or season == None or episode == None:
return "Invalid parameters"
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return "Show not in show list"
epObj = showObj.getEpisode(int(season), int(episode))
if epObj == None:
return "Episode couldn't be retrieved"
return epObj
ManageMenu = [
{ 'title': 'Backlog Overview', 'path': 'manage/backlogOverview' },
{ 'title': 'Manage Searches', 'path': 'manage/manageSearches' },
{ 'title': 'Episode Status Management', 'path': 'manage/episodeStatuses' },
{ 'title': 'Manage Missed Subtitles', 'path': 'manage/subtitleMissed' },
]
if sickbeard.USE_SUBTITLES:
ManageMenu.append({ 'title': 'Missed Subtitle Management', 'path': 'manage/subtitleMissed' })
class ManageSearches:
@cherrypy.expose
def index(self):
t = PageTemplate(file="manage_manageSearches.tmpl")
#t.backlogPI = sickbeard.backlogSearchScheduler.action.getProgressIndicator()
t.backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() #@UndefinedVariable
t.backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() #@UndefinedVariable
t.searchStatus = sickbeard.currentSearchScheduler.action.amActive #@UndefinedVariable
t.submenu = ManageMenu
return _munge(t)
@cherrypy.expose
def forceSearch(self):
# force it to run the next time it looks
result = sickbeard.currentSearchScheduler.forceRun()
if result:
logger.log(u"Search forced")
ui.notifications.message('Episode search started',
'Note: RSS feeds may not be updated if retrieved recently')
redirect("/manage/manageSearches")
@cherrypy.expose
def pauseBacklog(self, paused=None):
if paused == "1":
sickbeard.searchQueueScheduler.action.pause_backlog() #@UndefinedVariable
else:
sickbeard.searchQueueScheduler.action.unpause_backlog() #@UndefinedVariable
redirect("/manage/manageSearches")
@cherrypy.expose
def forceVersionCheck(self):
# force a check to see if there is a new version
result = sickbeard.versionCheckScheduler.action.check_for_new_version(force=True) #@UndefinedVariable
if result:
logger.log(u"Forcing version check")
redirect("/manage/manageSearches")
class Manage:
manageSearches = ManageSearches()
@cherrypy.expose
def index(self):
t = PageTemplate(file="manage.tmpl")
t.submenu = ManageMenu
return _munge(t)
@cherrypy.expose
def showEpisodeStatuses(self, tvdb_id, whichStatus):
myDB = db.DBConnection()
status_list = [int(whichStatus)]
if status_list[0] == SNATCHED:
st
|
F483/gravur
|
gravur/common/labelbox.py
|
Python
|
mit
| 232
| 0
|
# coding: utf-8
# Copyr
|
ight (c) 2015 Fabian Barkhau <fabian.barkhau@gmail.com>
# License: MIT (see LICENSE file)
from kivy.uix.label import Label
from gravur.utils import load_widget
@load_widget
class LabelBox(L
|
abel):
pass
|
tanium/pytan
|
lib/taniumpy/object_types/saved_action_approval.py
|
Python
|
mit
| 654
| 0.009174
|
# Copyright (c) 2015 Tanium Inc
#
# Generated from console.wsdl version 0.0.1
#
#
from .base import BaseType
class SavedActionApproval(BaseType):
_soap_tag = 'saved_action_approval'
def __init__(self):
BaseType.__init__(
self,
simple_properties={'id': int,
'name': str,
'appro
|
ved_flag': int},
complex_properties={'metadata': MetadataList},
list_properties={},
|
)
self.id = None
self.name = None
self.approved_flag = None
self.metadata = None
from metadata_list import MetadataList
|
johnbachman/indra
|
indra/databases/identifiers.py
|
Python
|
bsd-2-clause
| 11,854
| 0
|
import re
import logging
from indra.resources import load_resource_json
logger = logging.getLogger(__name__)
identifiers_url = 'https://identifiers.org'
# These are just special cases of name spaces where the mapping from INDRA to
# identifiers.org is not a question of simplecapitalization.
identifiers_mappings = {
'UP': 'uniprot',
'UPPRO': 'uniprot.chain',
'UPISO': 'uniprot.isoform',
'REFSEQ_PROT': 'refseq',
'PF': 'pfam',
'IP': 'interpro',
'ECCODE': 'ec-code',
'NONCODE': 'noncodev4.rna',
'LNCRNADB': 'rnacentral',
'MIRBASEM': 'mirbase.mature',
'EGID': 'ncbigene',
'NCBI': 'ncibgene',
'HGNC_GROUP': 'hgnc.genefamily',
'LINCS': 'lincs.smallmolecule',
'PUBCHEM': 'pubchem.compound',
'CHEMBL': 'chembl.compound',
'CTD': 'ctd.chemical',
'CVCL': 'cellosaurus',
}
# These are namespaces used by INDRA that don't have corresponding
# identifiers.org entries
non_registry = {
'SDIS', 'SCHEM', 'SFAM', 'SCOMP', 'SIGNOR', 'HMS-LINCS', 'NXPFA',
'OMIM', 'LSPCI', 'UPLOC', 'BFO', 'CCLE'
}
# These are namespaces that can appear in db_refs but are actually not
# representing grounding.
non_grounding = {
'TEXT', 'TEXT_NORM'
}
# These are reverse mappings from identifiers.org namespaces to INDRA
# namespaces
identifiers_reverse = {
v: k for k, v in identifiers_mappings.items()
}
# We have to patch this one because it is ambiguous
identifiers_reverse['ncbigene'] = 'EGID'
# These are only the URLs that are strictly prefixes and not more complicated
# patterns. This is because some downstream code uses these as prefixes
# rather than arbitrary patterns.
url_prefixes = {
# Biology namespaces
'NXPFA': 'https://www.nextprot.org/term/FA-',
'SIGNOR': 'https://signor.uniroma2.it/relation_result.php?id=',
'LSPCI': 'https://labsyspharm.github.io/lspci/',
# WM namespaces
'UN': 'https://github.com/
|
clulab/eidos/wiki/JSON-LD#Grounding/',
'WDI': 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/',
'FAO': 'https://github.com/
|
clulab/eidos/wiki/JSON-LD#Grounding/',
'HUME': ('https://github.com/BBN-E/Hume/blob/master/resource/ontologies'
'/hume_ontology/'),
'CWMS': 'http://trips.ihmc.us/',
'SOFIA': 'http://cs.cmu.edu/sofia/',
}
def get_ns_from_identifiers(identifiers_ns):
""""Return a namespace compatible with INDRA from an identifiers namespace.
For example, this function can be used to map 'uniprot' to 'UP'.
Parameters
----------
identifiers_ns : str
An identifiers.org standard namespace.
Returns
-------
str or None
The namespace compatible with INDRA's internal representation or
None if the given namespace isn't an identifiers.org standard.
"""
reg_entry = identifiers_registry.get(identifiers_ns.lower())
if not reg_entry:
return None
mapping = identifiers_reverse.get(identifiers_ns.lower())
if mapping:
return mapping
else:
return identifiers_ns.upper()
def get_ns_id_from_identifiers(identifiers_ns, identifiers_id):
"""Return a namespace/ID pair compatible with INDRA from identifiers.
Parameters
----------
identifiers_ns : str
An identifiers.org standard namespace.
identifiers_id : str
An identifiers.org standard ID in the given namespace.
Returns
-------
(str, str)
A namespace and ID that are valid in INDRA db_refs.
"""
reg_entry = identifiers_registry.get(identifiers_ns.lower())
db_ns = get_ns_from_identifiers(identifiers_ns)
if db_ns is None:
return None, None
db_id = identifiers_id
if reg_entry['namespace_embedded']:
if not identifiers_id.startswith(identifiers_ns.upper()):
db_id = '%s:%s' % (identifiers_ns.upper(), identifiers_id)
return db_ns, db_id
def get_identifiers_ns(db_name):
"""Map an INDRA namespace to an identifiers.org namespace when possible.
Example: this can be used to map 'UP' to 'uniprot'.
Parameters
----------
db_name : str
An INDRA namespace to map to identifiers.org
Returns
-------
str or None
An identifiers.org namespace or None if not available.
"""
mapped_db_name = identifiers_mappings.get(db_name, db_name.lower())
if mapped_db_name not in identifiers_registry:
return None
return mapped_db_name
def get_url_prefix(db_name):
"""Return the URL prefix for a given namespace."""
identifiers_ns = get_identifiers_ns(db_name)
if identifiers_ns:
identifiers_entry = identifiers_registry.get(identifiers_ns)
if not identifiers_entry['namespace_embedded']:
return '%s/%s:' % (identifiers_url, identifiers_ns.lower())
else:
return '%s/' % identifiers_url
else:
if db_name in url_prefixes:
return url_prefixes[db_name]
return None
def get_identifiers_url(db_name, db_id):
"""Return an identifiers.org URL for a given database name and ID.
Parameters
----------
db_name : str
An internal database name: HGNC, UP, CHEBI, etc.
db_id : str
An identifier in the given database.
Returns
-------
url : str
An identifiers.org URL corresponding to the given database name and ID.
"""
# This is the case where we have a prefix that we can simply attach the
# db_id to to get the desired URL.
if db_name == 'CHEMBL':
db_id = ensure_chembl_prefix(db_id)
elif db_name == 'CHEBI':
db_id = ensure_chebi_prefix(db_id)
prefix = get_url_prefix(db_name)
if prefix:
return '%s%s' % (prefix, db_id)
# Otherwise, we have to handle some special cases
bel_scai_url = 'https://arty.scai.fraunhofer.de/artifactory/bel/namespace/'
if db_name == 'LINCS':
if db_id.startswith('LSM-'): # Lincs Small Molecule ID
url = identifiers_url + '/lincs.smallmolecule:%s' % db_id
elif db_id.startswith('LCL-'): # Lincs Cell Line ID
url = identifiers_url + '/lincs.cell:%s' % db_id
else: # Assume LINCS Protein
url = identifiers_url + '/lincs.protein:%s' % db_id
elif db_name == 'CHEMBL':
if not db_id.startswith('CHEMBL'):
db_id = 'CHEMBL%s' % db_id
url = identifiers_url + '/chembl.compound:%s' % db_id
elif db_name == 'HMS-LINCS':
url = 'http://lincs.hms.harvard.edu/db/sm/%s-101' % db_id
# Special cases with no identifiers entry
elif db_name == 'SCHEM':
url = bel_scai_url + 'selventa-legacy-chemicals/' + \
'selventa-legacy-chemicals-20150601.belns'
elif db_name == 'SCOMP':
url = bel_scai_url + 'selventa-named-complexes/' + \
'selventa-named-complexes-20150601.belns'
elif db_name == 'SFAM':
url = bel_scai_url + 'selventa-protein-families/' + \
'selventa-protein-families-20150601.belns'
elif db_name == 'TEXT' or db_name == 'TEXT_NORM':
return None
else:
logger.warning('Unhandled name space %s' % db_name)
url = None
return url
def parse_identifiers_url(url):
"""Retrieve database name and ID given the URL.
Parameters
----------
url : str
An identifiers.org URL to parse.
Returns
-------
db_name : str
An internal database name: HGNC, UP, CHEBI, etc. corresponding to the
given URL.
db_id : str
An identifier in the database.
"""
# Try matching by string pattern
db_ns, db_id = None, None
url_pattern = \
r'(?:https?)://identifiers.org/([A-Za-z0-9.-]+)(/|:)([A-Za-z0-9:_.-]+)'
match = re.match(url_pattern, url)
if match is not None:
g = match.groups()
if len(g) == 3:
pattern_ns, pattern_id = g[0], g[2]
db_ns, db_id = get_ns_id_from_identifiers(pattern_ns, pattern_id)
if db_ns == 'HGNC':
if db_id.startswith('HGNC:'):
db_id = db_id[5:]
# If we got UP and UPPRO, return UPPRO
if db_ns == 'UP' and '#PRO_' in url:
db_ns = 'UPPRO'
|
bronikkk/tirpan
|
tests/test_mir04.py
|
Python
|
gpl-3.0
| 46
| 0
|
if x() and y() an
|
d z():
a()
else:
|
b()
|
vinoth3v/In
|
In/comment/page/load_more.py
|
Python
|
apache-2.0
| 2,050
| 0.054634
|
def action_comment_load_more(context, action, entity_type, entity_id, last_id, parent_id, **args):
try:
entity = IN.entitier.load_single(entity_type, int(entity_id))
if not entity:
return
output = Object()
db = IN.db
connection = db.connection
container_id = IN.commenter.get_container_id(entity)
# TODO: paging
# get total
total = 0
limit = 10
cursor = db.select({
'table' : 'entity.comment',
'columns' : ['count(id)'],
'where' : [
['container_id', container_id],
['id', '<', int(last_id)], # load previous
['parent_id', parent_id],
['status', 1],
],
}).execute()
if cursor.rowcount >= 0:
total = int(cursor.fetchone()[0])
more_id = '_'.join(('more-commens', entity_type, str(entity_id), str(parent_id)))
if total > 0:
cursor = db.select({
'table' : 'entity.comment',
'columns' : ['id'],
'where' : [
['container_id', container_id],
['id', '<', int(last_id)],
['parent_id', parent_id], # add main level comments only
['status', 1],
],
'order' : {'created' : 'DESC'},
'limit' : limit,
}).execute()
ids = []
last_id = 0
if cursor.rowcount >= 0:
for row in cursor:
ids.append(row['id'])
last_id = ids[-1] # last id
comments = IN.entitier.load_multiple('Comment', ids)
for id, comment in comments.items():
comment.weight = id # keep asc order
output.add(comment)
remaining = total - limit
if remaining > 0 and last_id > 0:
output.add('TextDiv', {
'id' : more_id,
'value' : str(remaining) + ' more comments',
'css' : ['ajax i-text-center i-text-danger pointer'],
'attributes' : {
'data-href' : ''.join(('/comment/more/!Content/', str(entity_id), '/', str(last_id), '/', str(parent_id)))
},
'weight' : -1,
})
#if not output:
#output.add(type = 'TextDiv', data = {})
|
output = {more_id : output}
context.response = In.core.response.PartialRes
|
ponse(output = output)
except:
IN.logger.debug()
|
jayme-github/CouchPotatoServer
|
couchpotato/core/plugins/quality/main.py
|
Python
|
gpl-3.0
| 8,674
| 0.007033
|
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.request import jsonified, getParams
from couchpotato.core.helpers.variable import mergeDicts, md5, getExt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Quality, Profile, ProfileType
from sqlalchemy.sql.expression import or_
import os.path
import re
import time
log = CPLog(__name__)
class QualityPlugin(Plugin):
qualities = [
{'identifier': 'bd50', 'hd': True, 'size': (15000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25'], 'allow': ['1080p'], 'ext':[], 'tags': ['bdmv', 'certificate', ('complete', 'bluray')]},
{'identifier': '1080p', 'hd': True, 'size': (5000, 20000), 'label': '1080P', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts'], 'tags': ['m2ts']},
{'identifier': '720p', 'hd': True, 'size': (3500, 10000), 'label': '720P', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts']},
{'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p'], 'ext':['avi']},
{'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': [], 'allow': [], 'ext':['iso', 'img'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts']},
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': ['dvdrip'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip'], 'allow': ['dvdr', 'dvd'], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': [], 'allow': ['dvdr'], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'tc', 'size': (600, 1000),
|
'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
|
{'identifier': 'ts', 'size': (600, 1000), 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']}
]
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']
def __init__(self):
addEvent('quality.all', self.all)
addEvent('quality.single', self.single)
addEvent('quality.guess', self.guess)
addEvent('quality.pre_releases', self.preReleases)
addApiView('quality.size.save', self.saveSize)
addApiView('quality.list', self.allView, docs = {
'desc': 'List all available qualities',
'return': {'type': 'object', 'example': """{
'success': True,
'list': array, qualities
}"""}
})
addEvent('app.initialize', self.fill, priority = 10)
def preReleases(self):
return self.pre_releases
def allView(self):
return jsonified({
'success': True,
'list': self.all()
})
def all(self):
db = get_session()
qualities = db.query(Quality).all()
temp = []
for quality in qualities:
q = mergeDicts(self.getQuality(quality.identifier), quality.to_dict())
temp.append(q)
return temp
def single(self, identifier = ''):
db = get_session()
quality_dict = {}
quality = db.query(Quality).filter(or_(Quality.identifier == identifier, Quality.id == identifier)).first()
if quality:
quality_dict = dict(self.getQuality(quality.identifier), **quality.to_dict())
return quality_dict
def getQuality(self, identifier):
for q in self.qualities:
if identifier == q.get('identifier'):
return q
def saveSize(self):
params = getParams()
db = get_session()
quality = db.query(Quality).filter_by(identifier = params.get('identifier')).first()
if quality:
setattr(quality, params.get('value_type'), params.get('value'))
db.commit()
return jsonified({
'success': True
})
def fill(self):
db = get_session();
order = 0
for q in self.qualities:
# Create quality
qual = db.query(Quality).filter_by(identifier = q.get('identifier')).first()
if not qual:
log.info('Creating quality: %s', q.get('label'))
qual = Quality()
qual.order = order
qual.identifier = q.get('identifier')
qual.label = toUnicode(q.get('label'))
qual.size_min, qual.size_max = q.get('size')
db.add(qual)
# Create single quality profile
prof = db.query(Profile).filter(
Profile.core == True
).filter(
Profile.types.any(quality = qual)
).all()
if not prof:
log.info('Creating profile: %s', q.get('label'))
prof = Profile(
core = True,
label = toUnicode(qual.label),
order = order
)
db.add(prof)
profile_type = ProfileType(
quality = qual,
profile = prof,
finish = True,
order = 0
)
prof.types.append(profile_type)
order += 1
db.commit()
time.sleep(0.3) # Wait a moment
return True
def guess(self, files, extra = {}):
# Create hash for cache
hash = md5(str([f.replace('.' + getExt(f), '') for f in files]))
cached = self.getCache(hash)
if cached and extra is {}: return cached
for cur_file in files:
size = (os.path.getsize(cur_file) / 1024 / 1024) if os.path.isfile(cur_file) else 0
words = re.split('\W+', cur_file.lower())
for quality in self.all():
# Check tags
if quality['identifier'] in words:
log.debug('Found via identifier "%s" in %s', (quality['identifier'], cur_file))
return self.setCache(hash, quality)
if list(set(quality.get('alternative', [])) & set(words)):
log.debug('Found %s via alt %s in %s', (quality['identifier'], quality.get('alternative'), cur_file))
return self.setCache(hash, quality)
for tag in quality.get('tags', []):
if isinstance(tag, tuple) and '.'.join(tag) in '.'.join(words):
log.debug('Found %s via tag %s in %s', (quality['identifier'], quality.get('tags'), cur_file))
return self.setCache(hash, quality)
if list(set(quality.get('tags', [])) & set(words)):
log.debug('Found %s via tag %s in %s', (quality['identifier'], quality.get('tags'), cur_file))
return self.setCache(hash, quality)
# Try again with loose testing
quality = self.guessLoose(hash, extra = extra)
if quality:
return self.setCache(hash, quality)
log.debug('Could not identify quality for: %s', files)
return None
def guessLoose(self, hash, extra):
for quality in self.all():
# Check width resolution, range 20
if (quality.get('width', 720) - 20) <= extra.get('resolution_width', 0) <= (quality.get('width', 720) + 20):
log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width', 720), extra.get('resolution_width', 0)))
return self.setCache(hash, quality)
# Check height resolution, range 20
if (quality.
|
wbap/Hackathon2015
|
Nishida/WBAI_open_code/lstm/lstm.py
|
Python
|
apache-2.0
| 1,640
| 0.028571
|
#coding:utf-8
import numpy as np
from chainer import Variable, FunctionSet
import chainer.functions as F
class LSTM(FunctionSet):
def __init__(self,f_n_units, n_units):
super(LSTM, self).__init__(
l1_x = F.Linear(f_n_units, 4*n_units),
l1_h = F.Linear
|
(n_units, 4*n_units),
l6 = F.Linear(n_units, f_n_units)
)
# パ
|
ラメータの値を-0.08~0.08の範囲で初期化
for param in self.parameters:
param[:] = np.random.uniform(-0.08, 0.08, param.shape)
def forward_one_step(self, x_data, y_data, state, train=True,dropout_ratio=0.0):
x ,t = Variable(x_data,volatile=not train),Variable(y_data,volatile=not train)
h1_in = self.l1_x(F.dropout(x, ratio=dropout_ratio, train=train)) + self.l1_h(state['h1'])
c1, h1 = F.lstm(state['c1'], h1_in)
y = self.l6(F.dropout(h1, ratio=dropout_ratio, train=train))
state = {'c1': c1, 'h1': h1}
return state, F.mean_squared_error(y, t)
def predict(self, x_data, y_data, state):
x ,t = Variable(x_data,volatile=False),Variable(y_data,volatile=False)
h1_in = self.l1_x(x) + self.l1_h(state['h1'])
c1, h1 = F.lstm(state['c1'], h1_in)
y = self.l6(h1)
state = {'c1': c1, 'h1': h1}
return state,F.mean_squared_error(y,t)
def make_initial_state(n_units,train = True):
return {name: Variable(np.zeros((1,n_units), dtype=np.float32),
volatile=not train)
for name in ('c1', 'h1')}
#for name in ('c1', 'h1', 'c2', 'h2', 'c3', 'h3','c4','h4','c5','h5')}
|
DG-i/openshift-ansible
|
roles/lib_openshift/library/oc_adm_registry.py
|
Python
|
apache-2.0
| 94,103
| 0.001551
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/registry -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_adm_registry
short_description: Module to manage openshift registry
description:
- Manage openshift registry programmatically.
options:
state:
description:
- The desired action when managing openshift registry
- present - update or create the registry
- absent - tear down the registry service and deploymentconfig
- list - returns the current representiation of a registry
required: false
default: False
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- The name of the registry
required: false
default: None
aliases: []
namespace:
description:
- The selector when filtering on node labels
required: false
default: None
aliases: []
images:
description:
- The image to base this registry on - ${component} will be replaced with --type
required: 'openshift3/ose-${component}:${version}'
default: None
aliases: []
latest_images:
description:
- If true, attempt to use the latest image for the registry instead of the latest release.
required: false
default: False
aliases: []
labels:
description:
- A set of labels to uniquely identify the registry and its components.
required: false
default: None
aliases: []
enforce_quota:
description:
- If set, the registry will refuse to write blobs if they exceed quota limits
required: False
default: False
aliases: []
mount_host:
description:
- If set, the registry volume will be created as a host-mount at this path.
required: False
default: False
aliases: []
ports:
description:
- A comma delimited list of ports or port pairs to expose on the registry pod. The default is set for 5000.
required: False
default: [5000]
aliases: []
replicas:
description:
- The replication factor of the registry; commonly 2 when high availability is desired.
required: False
default: 1
aliases: []
selector:
description:
- Selector used to filter nodes on deployment. Used to run registries on a specific set of nodes.
required: False
default: None
aliases: []
service_account:
description:
- Name of the service account to use to run the registry pod.
required: False
default: 'registry'
aliases: []
tls_certificate:
description:
- An optional path to a PEM encoded certificate (which may contain the private key) for serving over TLS
required: false
default: None
aliases: []
tls_key:
description:
- An optional path to a PEM encoded private key for serving over TLS
required: false
default: None
aliases: []
volume_mounts:
description:
- The volume mounts for the registry.
required: false
default: None
aliases: []
daemonset:
description:
- Use a daemonset instead of a deployment config.
required: false
default: False
aliases: []
edits:
description:
- A list of modifications to make on the deploymentconfig
required: false
default: None
aliases: []
env_vars:
description:
- A dictionary of modifications to make on the deploymentconfig. e.g. FOO: BAR
required: false
default: None
aliases: []
force:
description:
- Force a registry update.
required: false
default: False
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create a secure registry
oc_adm_registry:
name: docker-registry
service_account: registry
replicas: 2
namespace: default
selector: type=infra
images: "registry.ops.openshift.com/openshift3/ose-${component}:${version}"
env_vars:
REGISTRY_CONFIGURATION_PATH: /etc/registryconfig/config.yml
REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt
REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key
REGISTRY_HTTP_SECRET: supersecret
volume_mounts:
- path: /etc/secrets
name: dockercerts
type: secret
secret_name: registry-secret
- path: /etc/registryconfig
name: dockersecrets
type: secret
secret_name: docker-registry-config
edits:
- key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.strategy.rollingParams
value:
intervalSeconds: 1
maxSurge: 50%
maxUnavailable: 50%
timeoutSeconds: 600
updatePeriodSeconds: 1
action: put
- key: spec.template.spec.containers[0].resources.limits.memory
value: 2G
action: update
- key: spec.template.spec.containers[0].resources.requests.memory
value: 1G
action: update
register: registryout
'''
# -*- -*- -*- End included fragment: doc/registry -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
sepa
|
rator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
|
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
|
pedrox/meld
|
meld/vcview.py
|
Python
|
gpl-2.0
| 35,046
| 0.005364
|
### Copyright (C) 2002-2006 Stephen Kennedy <stevek@gnome.org>
### Copyright (C) 2010-2012 Kai Willadsen <kai.willadsen@gmail.com>
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; either version 2 of the License, or
### (at your option) any later version.
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
### USA.
from __future__ import print_function
import atexit
import tempfile
import shutil
import os
import sys
from gettext import gettext as _
import gtk
import pango
from . import melddoc
from . import misc
from . import paths
from . import recent
from . import tree
from . import vc
from .ui import emblemcellrenderer
from .ui import gnomeglade
################################################################################
#
# Local Functions
#
################################################################################
def _commonprefix(files):
if len(files) != 1:
workdir = misc.commonpr
|
efix(files)
else:
workdir = os.path.dirname(files[0]) or "."
return workdir
def cleanup_temp():
temp_location = tempfile.gettempd
|
ir()
# The strings below will probably end up as debug log, and are deliberately
# not marked for translation.
for f in _temp_files:
try:
assert os.path.exists(f) and os.path.isabs(f) and \
os.path.dirname(f) == temp_location
os.remove(f)
except:
except_str = "{0[0]}: \"{0[1]}\"".format(sys.exc_info())
print("File \"{0}\" not removed due to".format(f), except_str,
file=sys.stderr)
for f in _temp_dirs:
try:
assert os.path.exists(f) and os.path.isabs(f) and \
os.path.dirname(f) == temp_location
shutil.rmtree(f, ignore_errors=1)
except:
except_str = "{0[0]}: \"{0[1]}\"".format(sys.exc_info())
print("Directory \"{0}\" not removed due to".format(f), except_str,
file=sys.stderr)
_temp_dirs, _temp_files = [], []
atexit.register(cleanup_temp)
################################################################################
#
# CommitDialog
#
################################################################################
class CommitDialog(gnomeglade.Component):
def __init__(self, parent):
gnomeglade.Component.__init__(self, paths.ui_dir("vcview.ui"), "commitdialog")
self.parent = parent
self.widget.set_transient_for( parent.widget.get_toplevel() )
selected = parent._get_selected_files()
topdir = _commonprefix(selected)
selected = [ s[len(topdir):] for s in selected ]
self.changedfiles.set_text( ("(in %s) "%topdir) + " ".join(selected) )
self.widget.show_all()
def run(self):
self.previousentry.child.set_editable(False)
self.previousentry.set_active(0)
self.textview.grab_focus()
buf = self.textview.get_buffer()
buf.place_cursor( buf.get_start_iter() )
buf.move_mark( buf.get_selection_bound(), buf.get_end_iter() )
response = self.widget.run()
msg = buf.get_text(buf.get_start_iter(), buf.get_end_iter(), 0)
if response == gtk.RESPONSE_OK:
self.parent._command_on_selected( self.parent.vc.commit_command(msg) )
if len(msg.strip()):
self.previousentry.prepend_text(msg)
self.widget.destroy()
def on_previousentry_activate(self, gentry):
buf = self.textview.get_buffer()
buf.set_text( gentry.child.get_text() )
COL_LOCATION, COL_STATUS, COL_REVISION, COL_TAG, COL_OPTIONS, COL_END = \
list(range(tree.COL_END, tree.COL_END+6))
class VcTreeStore(tree.DiffTreeStore):
def __init__(self):
tree.DiffTreeStore.__init__(self, 1, [str] * 5)
################################################################################
# filters
################################################################################
entry_modified = lambda x: (x.state >= tree.STATE_NEW) or (x.isdir and (x.state > tree.STATE_NONE))
entry_normal = lambda x: (x.state == tree.STATE_NORMAL)
entry_nonvc = lambda x: (x.state == tree.STATE_NONE) or (x.isdir and (x.state > tree.STATE_IGNORED))
entry_ignored = lambda x: (x.state == tree.STATE_IGNORED) or x.isdir
################################################################################
#
# VcView
#
################################################################################
class VcView(melddoc.MeldDoc, gnomeglade.Component):
# Map action names to VC commands and required arguments list
action_vc_cmds_map = {
"VcCompare": ("diff_command", ()),
"VcCommit": ("commit_command", ("",)),
"VcUpdate": ("update_command", ()),
"VcAdd": ("add_command", ()),
"VcResolved": ("resolved_command", ()),
"VcRemove": ("remove_command", ()),
"VcRevert": ("revert_command", ()),
}
state_actions = {
"flatten": ("VcFlatten", None),
"modified": ("VcShowModified", entry_modified),
"normal": ("VcShowNormal", entry_normal),
"unknown": ("VcShowNonVC", entry_nonvc),
"ignored": ("VcShowIgnored", entry_ignored),
}
def __init__(self, prefs):
melddoc.MeldDoc.__init__(self, prefs)
gnomeglade.Component.__init__(self, paths.ui_dir("vcview.ui"), "vcview")
actions = (
("VcCompare", gtk.STOCK_DIALOG_INFO, _("_Compare"), None, _("Compare selected"), self.on_button_diff_clicked),
("VcCommit", "vc-commit-24", _("Co_mmit"), None, _("Commit"), self.on_button_commit_clicked),
("VcUpdate", "vc-update-24", _("_Update"), None, _("Update"), self.on_button_update_clicked),
("VcAdd", "vc-add-24", _("_Add"), None, _("Add to VC"), self.on_button_add_clicked),
("VcRemove", "vc-remove-24", _("_Remove"), None, _("Remove from VC"), self.on_button_remove_clicked),
("VcResolved", "vc-resolve-24", _("_Resolved"), None, _("Mark as resolved for VC"), self.on_button_resolved_clicked),
("VcRevert", gtk.STOCK_REVERT_TO_SAVED, None, None, _("Revert to original"), self.on_button_revert_clicked),
("VcDeleteLocally", gtk.STOCK_DELETE, None, None, _("Delete locally"), self.on_button_delete_clicked),
)
toggleactions = (
("VcFlatten", gtk.STOCK_GOTO_BOTTOM, _("_Flatten"), None, _("Flatten directories"), self.on_button_flatten_toggled, False),
("VcShowModified","filter-modified-24", _("_Modified"), None, _("Show modified"), self.on_filter_state_toggled, False),
("VcShowNormal", "filter-normal-24", _("_Normal"), None, _("Show normal"), self.on_filter_state_toggled, False),
("VcShowNonVC", "filter-nonvc-24", _("Non _VC"), None, _("Show unversioned files"), self.on_filter_state_toggled, False),
("VcShowIgnored", "filter-ignored-24", _("Ignored"), None, _("Show ignored files"), self.on_filter_state_toggled, False),
)
self.ui_file = paths.ui_dir("vcview-ui.xml")
self.actiongroup = gtk.ActionGroup('VcviewActions')
self.actiongroup.set_translation_domain("meld")
self.actiongroup.add_actions(actions)
self.actiongroup.add_toggle_actions(toggleactions)
for acti
|
googlefonts/nototools
|
nototools/unicode_data.py
|
Python
|
apache-2.0
| 57,418
| 0.000871
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bleeding-edge version of Unicode Character Database.
Provides an interface similar to Python's own unicodedata package, but with
the bleeding-edge data. The implementation is not efficient at all, it's
just done this way for the ease of use. The data is coming from bleeding
edge version of the Unicode Standard not yet published, so it is expected to
be unstable and sometimes inconsistent.
"""
__author__ = (
"roozbeh@google.com (Roozbeh Pournader) and " "cibu@google.com (Cibu Johny)"
)
import codecs
import collections
import os
from os import path
import re
from nototools.py23 import unichr, unicode, basestring
try:
import unicodedata2 as unicodedata # Unicode 8 compliant native lib
except ImportError:
import unicodedata # Python's internal library
from nototools import tool_utils # parse_int_ranges
# Update this when we update the base version data we use
UNICODE_VERSION = 14.0
_data_is_loaded = False
_property_value_aliases_data = {}
_character_names_data = {}
_general_category_data = {}
_combining_class_data = {}
_decomposition_data = {}
_bidi_mirroring_characters = set()
_script_data = {}
_script_extensions_data = {}
_block_data = {}
_block_range = {}
_block_names = []
_age_data = {}
_bidi_mirroring_glyph_data = {}
_core_properties_data = {}
_indic_positional_data = {}
_indic_syllabic_data = {}
_defined_characters = set()
_script_code_to_long_name = {}
_folded_script_name_to_code = {}
_lower_to_upper_case = {}
# emoji data
_presentation_default_emoji = None
_presentation_default_text = None
_emoji_modifier_base = None
_emoji = None
_emoji_variants = None
_emoji_variants_proposed = None
# non-emoji variant data
_variant_data = None
_variant_data_cps = None
# proposed emoji
_proposed_emoji_data = None
_proposed_emoji_data_cps = None
# emoji sequences
_emoji_sequence_data = None
_emoji_non_vs_to_canonical = None
_emoji_group_data = None
# nameslist/namealiases
_nameslist_see_also = None
_namealiases_alt_names = None
def load_data():
"""Loads the data files needed for the module.
Could be used by processes that care about controlling when the data is
loaded. Otherwise, data will be loaded the first time it's needed.
"""
global _data_is_loaded
|
if not _data_is_loaded:
_load_property_value_aliases_txt()
_load_unicode_data_
|
txt()
_load_scripts_txt()
_load_script_extensions_txt()
_load_blocks_txt()
_load_derived_age_txt()
_load_derived_core_properties_txt()
_load_bidi_mirroring_txt()
_load_indic_data()
_load_emoji_data()
_load_emoji_sequence_data()
_load_unicode_emoji_variants()
_load_variant_data()
_load_proposed_emoji_data()
_load_nameslist_data()
_load_namealiases_data()
_data_is_loaded = True
def name(char, *args):
"""Returns the name of a character.
Raises a ValueError exception if the character is undefined, unless an
extra argument is given, in which case it will return that argument.
"""
if isinstance(char, int):
char = unichr(char)
# First try and get the name from unidata, which is faster and supports
# CJK and Hangul automatic names
try:
return unicodedata.name(char)
except ValueError as val_error:
cp = ord(char)
load_data()
if cp in _character_names_data:
return _character_names_data[cp]
elif (cp,) in _emoji_sequence_data:
return _emoji_sequence_data[(cp,)][0]
elif args:
return args[0]
else:
raise Exception('no name for "%0x"' % ord(char))
def _char_to_int(char):
"""Converts a potential character to its scalar value."""
if type(char) in [str, type(u"")]:
return ord(char)
else:
return char
def derived_props():
load_data()
return frozenset(_core_properties_data.keys())
def chars_with_property(propname):
load_data()
return frozenset(_core_properties_data[propname])
def category(char):
"""Returns the general category of a character."""
load_data()
char = _char_to_int(char)
try:
return _general_category_data[char]
except KeyError:
return "Cn" # Unassigned
def combining(char):
"""Returns the canonical combining class of a character."""
load_data()
char = _char_to_int(char)
try:
return _combining_class_data[char]
except KeyError:
return 0
def to_upper(char):
"""Returns the upper case for a lower case character.
This is not full upper casing, but simply reflects the 1-1
mapping in UnicodeData.txt."""
load_data()
cp = _char_to_int(char)
try:
if _general_category_data[cp] == "Ll":
return unichr(_lower_to_upper_case[cp])
except KeyError:
pass
return char
def canonical_decomposition(char):
"""Returns the canonical decomposition of a character as a Unicode string."""
load_data()
char = _char_to_int(char)
try:
return _decomposition_data[char]
except KeyError:
return u""
def script(char):
"""Returns the script property of a character as a four-letter code."""
load_data()
char = _char_to_int(char)
try:
return _script_data[char]
except KeyError:
return "Zzzz" # Unknown
def script_extensions(char):
"""Returns the script extensions property of a character.
The return value is a frozenset of four-letter script codes.
"""
load_data()
char = _char_to_int(char)
try:
return _script_extensions_data[char]
except KeyError:
return frozenset([script(char)])
def block(char):
"""Returns the block property of a character."""
load_data()
char = _char_to_int(char)
try:
return _block_data[char]
except KeyError:
return "No_Block"
def block_range(block):
"""Returns a range (first, last) of the named block."""
load_data()
return _block_range[block]
def block_chars(block):
"""Returns a frozenset of the cps in the named block."""
load_data()
first, last = _block_range[block]
return frozenset(range(first, last + 1))
def block_names():
"""Returns the names of the blocks in block order."""
load_data()
return _block_names[:]
def age(char):
"""Returns the age property of a character as a string.
Returns None if the character is unassigned."""
load_data()
char = _char_to_int(char)
try:
return _age_data[char]
except KeyError:
return None
# Uniscribe treats these ignorables (Hangul fillers) as spacing.
UNISCRIBE_USED_IGNORABLES = frozenset([0x115F, 0x1160, 0x3164, 0xFFA0])
def is_default_ignorable(char):
"""Returns true if the character has the Default_Ignorable property."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
return char in _core_properties_data["Default_Ignorable_Code_Point"]
def default_ignorables():
load_data()
return frozenset(_core_properties_data["Default_Ignorable_Code_Point"])
def is_defined(char):
"""Returns true if the character is defined in the Unicode Standard."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
return char in _defined_characters
def is_private_use(char):
"""Returns true if the characters is a private use character."""
return category(char) == "Co"
def mirrored(char):
"""Returns 1 if the characters is bidi mi
|
heprom/pymicro
|
pymicro/core/tests/test_samples.py
|
Python
|
mit
| 19,023
| 0.00021
|
import unittest
import os
import numpy as np
import math
from tables import IsDescription, Int32Col, Float32Col
from py
|
micro.core.samples import SampleData
from BasicTools.Containers.ConstantRectilinearMesh import ConstantRectilinearMesh
import BasicTools.Containers.UnstructuredMeshCreationTools as UMCT
from config import PYMICRO_EXAMPLES_DATA_DIR
class TestGrainData(IsDescription):
"""
Description class speci
|
fying structured storage for tests
"""
idnumber = Int32Col() # Signed 64-bit integer
volume = Float32Col() # float
center = Float32Col(shape=(3,)) # float
class TestDerivedClass(SampleData):
""" Class to test the datamodel specification mechanism, via definition
of classes derived from SampleData
"""
def minimal_data_model(self):
"""
Specify the minimal contents of the hdf5 (Group names, paths,, and
group types) in the form of a dictionary {content:Location}
Extends SampleData Class _minimal_data_model class
"""
# create a dtype to create a structured array
Descr = np.dtype([('density', np.float32), ('melting_Pt', np.float32),
('Chemical_comp', 'S', 30)])
# create data model description dictionaries
minimal_content_index_dic = {'Image_data': '/CellData',
'grain_map': '/CellData/grain_map',
'Grain_data': '/GrainData',
'GrainDataTable': ('/GrainData/'
'GrainDataTable'),
'Crystal_data': '/CrystalStructure',
'lattice_params': ('/CrystalStructure'
'/LatticeParameters'),
'lattice_props': ('/CrystalStructure'
'/LatticeProps'),
'grain_names': '/GrainData/GrainNames',
'Mesh_data': '/MeshData'}
minimal_content_type_dic = {'Image_data': '3DImage',
'grain_map': 'field_array',
'Grain_data': 'Group',
'GrainDataTable': Test_GrainData,
'Crystal_data': 'Group',
'lattice_params': 'data_array',
'lattice_props': Descr,
'grain_names': 'string_array',
'Mesh_data': 'Mesh'
}
return minimal_content_index_dic, minimal_content_type_dic
class SampleDataTests(unittest.TestCase):
def setUp(self):
print('testing the SampleData class')
# Create data to store into SampleData instances
# dataset sample_name and description
self.sample_name = 'test_sample'
self.sample_description = """
This is a test dataset created by the SampleData class unit tests.
"""
# Create a mesh of an octahedron with 6 triangles
self.mesh_nodes = np.array([[-1., -1., 0.],
[-1., 1., 0.],
[1., 1., 0.],
[1., -1., 0.],
[0., 0., 1.],
[0., 0., -1.]])
self.mesh_elements = np.array([[0, 1, 4],
[0, 1, 5],
[1, 2, 4],
[1, 2, 5],
[2, 3, 4],
[2, 3, 5],
[3, 0, 4],
[3, 0, 5]])
# Create 2 fields 'shape functions' for the 2 nodes at z=+/-1
self.mesh_shape_f1 = np.array([0., 0., 0., 0., 1., 0.])
self.mesh_shape_f2 = np.array([0., 0., 0., 0., 0., 1.])
# Create 2 element wise fields
self.mesh_el_Id = np.array([0., 1., 2., 3., 4., 5., 6., 7.])
self.mesh_alternated = np.array([1., 1., -1., -1., 1., 1., -1., -1.])
# Create a binary 3D Image
self.image = np.zeros((10, 10, 10), dtype='int16')
self.image[:, :, :5] = 1
self.image_origin = np.array([-1., -1., -1.])
self.image_voxel_size = np.array([0.2, 0.2, 0.2])
# Create a data array
self.data_array = np.array([math.tan(x) for x in
np.linspace(-math.pi/4, math.pi/4, 51)])
# Create numpy dtype and structure array
# WARNING: Pytables transforms all strings into bytes
# --> use only bytes in dtypes
self.dtype1 = np.dtype([('density', np.float32),
('melting_Pt', np.float32),
('Chemical_comp', 'S', 30)])
self.struct_array1 = np.array([(6.0, 1232, 'Cu2O'),
(5.85, 2608, 'ZrO2')],
dtype=self.dtype1)
# Test file pathes
self.filename = os.path.join(PYMICRO_EXAMPLES_DATA_DIR,
'test_sampledata')
self.derived_filename = self.filename+'_derived'
self.reference_file = os.path.join(PYMICRO_EXAMPLES_DATA_DIR,
'test_sampledata_ref')
def test_create_sample(self):
"""Test creation of a SampleData instance/file and data storage."""
sample = SampleData(filename=self.filename,
overwrite_hdf5=True, verbose=False,
sample_name=self.sample_name,
sample_description=self.sample_description)
self.assertTrue(os.path.exists(self.filename + '.h5'))
self.assertTrue(os.path.exists(self.filename + '.xdmf'))
self.assertEqual(sample.get_sample_name(), self.sample_name)
self.assertEqual(sample.get_description(), self.sample_description)
# Add mesh data into SampleData dataset
mesh = UMCT.CreateMeshOfTriangles(self.mesh_nodes, self.mesh_elements)
# Add mesh node tags
mesh.nodesTags.CreateTag('Z0_plane', False).SetIds([0, 1, 2, 3])
mesh.nodesTags.CreateTag('out_of_plane', False).SetIds([4, 5])
# Add element tags
mesh.GetElementsOfType('tri3').GetTag('Top').SetIds([0, 2, 4, 6])
mesh.GetElementsOfType('tri3').GetTag('Bottom').SetIds([1, 3, 5, 7])
# Add mesh node fields
mesh.nodeFields['Test_field1'] = self.mesh_shape_f1
mesh.nodeFields['Test_field2'] = self.mesh_shape_f2
# Add mesh element fields
mesh.elemFields['Test_field3'] = self.mesh_el_Id
mesh.elemFields['Test_field4'] = self.mesh_alternated
sample.add_mesh(mesh, meshname='test_mesh', indexname='mesh',
location='/', bin_fields_from_sets=True)
# Add image data into SampleData dataset
image = ConstantRectilinearMesh(dim=len(self.image.shape))
image.SetDimensions(self.image.shape)
image.SetOrigin(self.image_origin)
image.SetSpacing(self.image_voxel_size)
image.elemFields['test_image_field'] = self.image
sample.add_image(image, imagename='test_image', indexname='image',
location='/')
# Add new group and array to SampleData dataset
sample.add_group(groupname='test_group', location='/', indexname='group')
sample.add_data_array(location='group', name='test_array',
array=self.data_array, indexname='array')
# close sample data instance
del sample
# reopen sample data instance
sample = SampleData(filename=self.filename)
# test mesh geometry data recovery
mesh_nodes = sample.get_mesh_nodes(meshname='mesh', as_numpy=True)
self.assertTrue(np.all(mesh_nodes == self.mesh_n
|
xyos/horarios
|
horarios/helpers.py
|
Python
|
mit
| 5,854
| 0.005296
|
import json
import logging
import httplib
import urllib2
from django.core.exceptions import ValidationError
from django.conf import settings
siaUrl=settings.SIA_URL
import re
import string
def sanitize_search_term(term):
# Replace all puncuation with spaces.
allowed_punctuation = set(['&', '|', '"', "'"])
all_punctuation = set(string.punctuation)
punctuation = "".join(all_punctuation - allowed_punctuation)
term = re.sub(r"[{}]+".format(re.escape(punctuation)), " ", \
term)
# Substitute all double quotes to single quotes.
term = term.replace('"', "'")
term = re.sub(r"[']+", "'", term)
# Create regex to find strings within quotes.
quoted_strings_re = re.compile(r"('[^']*')")
space_between_words_re = re.compile(r'([^ &|])[ ]+([^ &|])')
spaces_surrounding_letter_re = re.compile(r'[ ]+([^ &|])[ ]+')
multiple_operator_re = re.compile(r"[ &]+(&|\|)[ &]+")
tokens = quoted_strings_re.split(term)
processed_tokens =
|
[]
for tok
|
en in tokens:
# Remove all surrounding whitespace.
token = token.strip()
if token in ['', "'"]:
continue
if token[0] != "'":
# Surround single letters with &'s
token = spaces_surrounding_letter_re.sub(r' & \1 & ', token)
# Specify '&' between words that have neither | or & specified.
token = space_between_words_re.sub(r'\1 & \2', token)
# Add a prefix wildcard to every search term.
token = re.sub(r'([^ &|]+)', r'\1:*', token)
processed_tokens.append(token)
term = " & ".join(processed_tokens)
# Replace ampersands or pipes surrounded by ampersands.
term = multiple_operator_re.sub(r" \1 ", term)
# Escape single quotes
return term.replace("'", "''")
class SIA:
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
cache = CacheManager(**parse_cache_config_options({
'cache.type': 'file',
'cache.data_dir': '/tmp/horariossiacache/data',
'cache.lock_dir': '/tmp/horariossiacache/lock',
'cache.regions': 'short_term, long_term',
'cache.short_term.type': 'memory',
'cache.short_term.expire': '3600',
'cache.long_term.type': 'file',
'cache.long_term.expire': '86400'
}))
def existsSubject(this,name,level):
return this.queryNumSubjectsWithName(name,level)>0
def queryNumSubjectsWithName(this,name,level):
data = json.dumps({"method": "buscador.obtenerAsignaturas", "params": [name, level, "", level, "", "", 1, 1]})
req = urllib2.Request(siaUrl + "/JSON-RPC", data, {'Content-Type': 'application/json'})
try:
f = urllib2.urlopen(req)
result = json.loads(f.read())["result"]["totalAsignaturas"]
f.close()
except urllib2.HTTPerror, e:
logging.warning('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.warning('URLError = ' + e.reason)
except httplib.HTTPException, e:
logging.warn('HTTPException')
return result
@cache.region('short_term')
def querySubjectsByName(this,name,level,maxRetrieve):
data = json.dumps({"method": "buscador.obtenerAsignaturas", "params": [name, level, "", level, "", "", 1, maxRetrieve]})
req = urllib2.Request(siaUrl + "/JSON-RPC", data, {'Content-Type': 'application/json'})
try:
f = urllib2.urlopen(req)
result = json.loads(f.read())
f.close()
except urllib2.HTTPerror, e:
logging.warning('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.warning('URLError = ' + e.reason)
except httplib.HTTPException, e:
logging.warn('HTTPException')
return result["result"]["asignaturas"]["list"]
@cache.region('short_term')
def queryGroupsBySubjectCode(this,code):
data = json.dumps({"method": "buscador.obtenerGruposAsignaturas", "params": [code, "0"]})
req = urllib2.Request(siaUrl + "/JSON-RPC", data, {'Content-Type': 'application/json'})
result = None
try:
f = urllib2.urlopen(req)
result = json.loads(f.read())
f.close()
except urllib2.HTTPError, e:
logging.warning('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.warning('URLError = ' + e.reason)
except httplib.HTTPException, e:
logging.warn('HTTPException')
if result:
return result["result"]["list"]
else:
return []
@staticmethod
@cache.region('short_term')
def queryGroupsProfessions(code,group):
import re
while True:
try:
f = urllib2.urlopen(siaUrl + "/service/groupInfo.pub?cod_asignatura=" + str(code) + "&grp=" + str(group))
html = f.read().decode("ISO-8859-1")
break
except urllib2.URLError, e:
if e.code == 403:
pass
else:
logging.warning(str(e))
break
except Exception, e:
logging.warning(str(e))
break
relevantSection = re.compile(r'Los planes de estudio para los cuales se ofrece esta asignatura son:</p><div><ul class="modulelist">(.*)</ul></div>').findall(html)
professions = []
if (len(relevantSection)>0):
professionsHtml = re.compile('<li><p>(.*?)</p></li>').findall(relevantSection[0])
for i in professionsHtml:
data = i.split("-")
professions.append((data[0].strip(),re.compile('<em>(.*)</em>').findall("".join(data[1:]))[0]))
return professions
|
mozilla/popcorn_maker
|
vendor-local/lib/python/whoosh/filedb/filewriting.py
|
Python
|
bsd-3-clause
| 21,271
| 0.000705
|
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import with_statement
from bisect import bisect_right
from whoosh.fields import UnknownFieldError
from whoosh.store import LockError
from whoosh.support.filelock import try_for
from whoosh.support.externalsort import SortingPool
from whoosh.util import fib
from whoosh.writing import IndexWriter, IndexingError
# Merge policies
# A merge policy is a callable that takes the Index object, the SegmentWriter
# object, and the current segment list (not including the segment being
# written), and returns an updated segment list (not including the segment
# being written).
def NO_MERGE(writer, segments):
"""This policy does not merge any existing segments.
"""
return segments
def MERGE_SMALL(writer, segments):
"""This policy merges small segments, where "small" is defined using a
heuristic based on the fibonacci sequence.
"""
from whoosh.filedb.filereading import SegmentReader
newsegments = []
sorted_segment_list = sorted(segments, key=lambda s: s.doc_count_all())
total_docs = 0
for i, seg in enumerate(sorted_segment_list):
count = seg.doc_count_all()
if count > 0:
total_docs += count
if total_docs < fib(i + 5):
reader = SegmentReader(writer.storage, writer.schema, seg)
writer.add_reader(reader)
reader.close()
else:
newsegments.append(seg)
return newsegments
def OPTIMIZE(writer, segments):
"""This policy merges all existing segments.
"""
from whoosh.filedb.filereading import SegmentReader
for seg in segments:
reader = SegmentReader(writer.storage, writer.schema, seg)
writer.add_reader(reader)
reader.close()
return []
class PostingPool(SortingPool):
# Subclass whoosh.support.externalsort.SortingPool to use knowledge of
# postings to set run size in bytes instead of item
|
s
def __init__(self, limitmb=128, **kwargs):
SortingPool.__init__(self, **kwargs)
self.limit = limitmb * 1024 * 1024
self.currentsize = 0
def add(self, item):
# item = (fieldname, text, docnum, weight, valuestring)
size = (28 + 4 * 5 # tuple = 28 + 4 * length
+ 21 + len(item[0]) # fieldname = str = 21 + length
+ 26 + len(item[1]) * 2 # text = unicode = 26 + 2 * length
|
+ 18 # docnum = long = 18
+ 16 # weight = float = 16
+ 21 + len(item[4] or '')) # valuestring
self.currentsize += size
if self.currentsize > self.limit:
self.save()
self.current.append(item)
def iter_postings(self):
# This is just an alias for items() to be consistent with the
# iter_postings()/add_postings() interface of a lot of other classes
return self.items()
def save(self):
SortingPool.save(self)
self.currentsize = 0
def renumber_postings(reader, startdoc, docmap):
for fieldname, text, docnum, weight, value in reader.iter_postings():
newdoc = docmap[docnum] if docmap else startdoc + docnum
yield (fieldname, text, newdoc, weight, value)
# Writer object
class SegmentWriter(IndexWriter):
def __init__(self, ix, poolclass=None, timeout=0.0, delay=0.1, _lk=True,
limitmb=128, docbase=0, codec=None, compound=True, **kwargs):
# Lock the index
self.writelock = None
if _lk:
self.writelock = ix.lock("WRITELOCK")
if not try_for(self.writelock.acquire, timeout=timeout,
delay=delay):
raise LockError
if codec is None:
from whoosh.codec import default_codec
codec = default_codec()
self.codec = codec
# Get info from the index
self.storage = ix.storage
self.indexname = ix.indexname
info = ix._read_toc()
self.generation = info.generation + 1
self.schema = info.schema
self.segments = info.segments
self.docnum = self.docbase = docbase
self._setup_doc_offsets()
# Internals
self.compound = compound
poolprefix = "whoosh_%s_" % self.indexname
self.pool = PostingPool(limitmb=limitmb, prefix=poolprefix)
newsegment = self.newsegment = codec.new_segment(self.storage,
self.indexname)
self.is_closed = False
self._added = False
# Set up writers
self.perdocwriter = codec.per_document_writer(self.storage, newsegment)
self.fieldwriter = codec.field_writer(self.storage, newsegment)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.newsegment)
def _setup_doc_offsets(self):
self._doc_offsets = []
base = 0
for s in self.segments:
self._doc_offsets.append(base)
base += s.doc_count_all()
def _check_state(self):
if self.is_closed:
raise IndexingError("This writer is closed")
def add_field(self, fieldname, fieldspec, **kwargs):
self._check_state()
if self._added:
raise Exception("Can't modify schema after adding data to writer")
super(SegmentWriter, self).add_field(fieldname, fieldspec, **kwargs)
def remove_field(self, fieldname):
self._check_state()
if self._added:
raise Exception("Can't modify schema after adding data to writer")
super(SegmentWriter, self).remove_field(fieldname)
def _document_segment(self, docnum):
#Returns the index.Segment object containing the given document
#number.
offsets = self._doc_offsets
if len(offsets) == 1:
return 0
return bisect_right(offsets, docnum) - 1
def _segment_and_docnum(self, docnum):
#Returns an (index.Segment, segment_docnum) pair for the segment
#containing the given document number.
segmentnum = self._document_segment(docnum)
offset = self._doc_offsets[segmentnum]
segment = self.segments[segmentnum]
return segment, docnum - offset
def has_deletions(self):
"""
Returns True if this index has documents that are marked deleted but
haven't been optimized out of the index yet.
"""
return any(s.has_deletions() for s in self.segments)
def delete_document(self, docnum, delete=True):
self._check_state()
if docnum >= sum(seg.doccount for seg in self.segments):
raise IndexingError("No documen
|
dmilith/SublimeText3-dmilith
|
Packages/Debugger/modules/libs/pywinpty/tests/test_import.py
|
Python
|
mit
| 909
| 0.0011
|
import sublime
import unittest
import os
import sys
class TestImport(unittest.TestCase):
mpath = None
@classmethod
def setUpClass(cls):
basedir = os.path.dirname(__file__)
mpath = os.path.normpath(os.path.join(
basedir, "..", "st3_{}_{}".format(sublime.platform(), sublime.arch())))
if mpath not in sys.path:
cls.mpath = mpath
sys.path.append(mpath)
def test_import(self):
from winpty import PtyProcess
self.assertTrue("winpty" in sys.modules)
proc = PtyProcess.spawn('cmd.exe')
self.assertTrue(proc.isalive())
proc.terminate(True)
@
|
classmethod
def tearDownClass(cls):
if not cls.mpath:
return
mpath = cls.mpath
if mpath in sys.path:
|
sys.path.remove(mpath)
if "winpty" in sys.modules:
del sys.modules["winpty"]
|
rmak/splunk-sdk-python
|
examples/handlers/handler_urllib2.py
|
Python
|
apache-2.0
| 1,492
| 0.002681
|
#!/usr/bin/env python
#
# Copyright 2011 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you
|
may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language govern
|
ing permissions and limitations
# under the License.
"""Example of a urllib2 based HTTP request handler."""
from pprint import pprint
from StringIO import StringIO
import sys
import urllib2
import splunk.client as client
import utils
def request(url, message, **kwargs):
method = message['method'].lower()
data = message.get('body', "") if method == 'post' else None
headers = dict(message.get('headers', []))
context = urllib2.Request(url, data, headers)
try:
response = urllib2.urlopen(context)
except urllib2.HTTPError, response:
pass # Propagate HTTP errors via the returned response message
return {
'status': response.code,
'reason': response.msg,
'headers': response.info().dict,
'body': StringIO(response.read())
}
opts = utils.parse(sys.argv[1:], {}, ".splunkrc")
service = client.connect(handler=request, **opts.kwargs)
pprint(service.apps.list())
|
kanarelo/reportlab
|
tests/test_lib_utils.py
|
Python
|
bsd-3-clause
| 5,924
| 0.006752
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
"""Tests for reportlab.lib.utils
"""
__version__=''' $Id$ '''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, printLocation
setOutDir(__name__)
import os, time, sys
import reportlab
import unittest
from reportlab.lib import colors
from reportlab.lib.utils import recursiveImport, recursiveGetAttr, recursiveSetAttr, rl_isfile, \
isCompactDistro, isPy3
def _rel_open_and_read(fn):
from reportlab.lib.utils import open_and_read
from reportlab.lib.testutils import testsFolder
cwd = os.getcwd()
os.chdir(testsFolder)
try:
return open_and_read(fn)
finally:
os.chdir(cwd)
class ImporterTestCase(unittest.TestCase):
"Test import utilities"
count = 0
def setUp(self):
from reportlab.lib.utils import get_rl_tempdir
s = repr(int(time.time())) + repr(self.count)
self.__class__.count += 1
self._tempdir = get_rl_tempdir('reportlab_test','tmp_%s' % s)
if not os.path.isdir(self._tempdir):
os.makedirs(self._tempdir,0o700)
_testmodulename = os.path.join(self._tempdir,'test_module_%s.py' % s)
f = open(_testmodulename,'w')
f.write('__all__=[]\n')
f.close()
if sys.platform=='darwin' and isPy3:
time.sleep(0.3)
self._testmodulename = os.path.splitext(os.path.basename(_testmodulename))[0]
def tearDown(self):
from shutil import rmtree
rmtree(self._tempdir,1)
def test1(self):
"try stuff known to be in the path"
m1 = recursiveImport('reportlab.pdfgen.canvas')
import reportlab.pdfgen.canvas
assert m1 == reportlab.pdfgen.canvas
def test2(self):
"try under a well known directory NOT on the path"
from reportlab.lib.testutils import testsFolder
D = os.path.join(testsFolder,'..','tools','pythonpoint')
fn = os.path.join(D,'stdparser.py')
if rl_isfile(fn) or rl_isfile(fn+'c') or rl_isfile(fn+'o'):
m1 = recursiveImport('stdparser', baseDir=D)
def test3(self):
"ensure CWD is on the path"
try:
cwd = os.getcwd()
os.chdir(self._tempdir)
m1 = recursiveImport(self._testmodulename)
finally:
os.chdir(cwd)
def test4(self):
"ensure noCWD removes current dir from path"
try:
cwd = os.getcwd()
os.chdir(self._tempdir)
import sys
try:
del sys.modules[self._testmodulename]
except KeyError:
pass
self.assertRaises(ImportError,
recursiveImport,
self._testmodulename,
noCWD=1)
finally:
os.chdir(cwd)
def test5(self):
"recursive attribute setting/getting on modules"
import reportlab.lib.units
inch = recursiveGetAttr(reportlab, 'lib.units.inch')
assert inch == 72
recursiveSetAttr(reportlab, 'lib.units.cubit', 18*inch)
cubit = recursiveGetAttr(reportlab, 'lib.units.cubit')
assert cubit == 18*inch
def test6(self):
"recursive attribute setting/getting on drawings"
from reportlab.graphics.charts.barcharts import sampleH1
drawing = sampleH1()
recursiveSetAttr(drawing, 'barchart.valueAxis.valueMax', 72)
theMax = recursiveGetAttr(drawing, 'barchart.valueAxis.valueMax')
assert theMax == 72
def test7(self):
"test open and read of a simple relative file"
b = _rel_open_and_read('../docs/images/Edit_Prefs.gif')
def test8(self):
"test open and read of a relative file: URL"
b = _rel_open_and_read('file:../docs/images/Edit_Prefs.gif')
def test9(self):
"test open and read of an http: URL"
from reportlab.lib.utils import open_and_read
b = open_and_read('http://www.reportlab.com/rsrc/encryption.gif')
def test10(self):
"test open and read of a simple relative file"
from reportlab.lib.utils import open_and_read, getBytesIO
b = getBytesIO(_rel_open_and_read('../docs/images/Edit_Prefs.gif'))
b = open_and_read(b)
def test11(self):
"test open and read of an RFC 2397 data URI with base64 encoding"
result = _rel_open_and_read('data:image/gif;base64,R0lGODdhAQABAIAAAP///////ywAAAAAAQABAAACAkQBADs=')
self.assertEquals(result,b'GIF87a\x01\x00\x01\x00\x80\x00\x00\xff\xff\xff\xff\xff\xff,\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
def test12(self):
"test open and read of an RFC 2397 data URI without an encoding"
result = _re
|
l_open_and_read('data:text/plain;,Hello%20World')
self.assertEquals(result,b'Hello World')
def testRecursiveImportErrors(self):
"check we get useful error messages"
try:
|
m1 = recursiveImport('reportlab.pdfgen.brush')
self.fail("Imported a nonexistent module")
except ImportError as e:
self.assertIn('reportlab.pdfgen.brush',str(e))
try:
m1 = recursiveImport('totally.non.existent')
self.fail("Imported a nonexistent module")
except ImportError as e:
self.assertIn('totally',str(e))
try:
#import a module in the 'tests' directory with a bug
m1 = recursiveImport('unimportable')
self.fail("Imported a buggy module")
except Exception as e:
self.assertIn(reportlab.isPy3 and 'division by zero' or 'integer division or modulo by zero',str(e))
def makeSuite():
return makeSuiteForClasses(ImporterTestCase)
if __name__ == "__main__": #noruntests
unittest.TextTestRunner().run(makeSuite())
printLocation()
|
uclouvain/OSIS-Louvain
|
features/steps/utils/pages.py
|
Python
|
agpl-3.0
| 3,065
| 0.000326
|
# ############################################################################
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WA
|
RRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
# #########
|
###################################################################
import pypom
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from features.pages.common import CommonPageMixin
from features.fields.fields import InputField, SelectField, ButtonField
class SearchEntityPage(CommonPageMixin, pypom.Page):
URL_TEMPLATE = '/entities/'
acronym = InputField(By.ID, 'id_acronym')
title = InputField(By.ID, 'id_title')
entity_type = SelectField(By.ID, "id_entity_type")
search = ButtonField(By.ID, "bt_submit_entity_search")
def find_acronym_in_table(self, row: int = 1):
return self.find_element(By.ID, 'td_entity_%d' % row).text
class SearchOrganizationPage(CommonPageMixin, pypom.Page):
URL_TEMPLATE = '/organizations/'
acronym = InputField(By.ID, 'id_acronym')
name = InputField(By.ID, 'id_name')
type = SelectField(By.ID, "id_type")
search = ButtonField(By.ID, "bt_submit_organization_search")
def find_acronym_in_table(self, row: int = 1):
return self.find_element(By.ID, 'td_organization_%d' % row).text
class SearchStudentPage(CommonPageMixin, pypom.Page):
URL_TEMPLATE = '/students/'
registration_id = InputField(By.ID, 'id_registration_id')
name = InputField(By.ID, 'id_name')
search = ButtonField(By.ID, "bt_submit_student_search")
def find_registration_id_in_table(self, row: int = 1):
return self.find_element(By.ID, 'td_student_%d' % row).text
def find_name_in_table(self):
names = []
row = 1
last = False
while not last:
try:
elt = self.find_element(By.ID, 'spn_student_name_%d' % row)
names.append(elt.text)
row += 1
except NoSuchElementException as e:
return names
return names
|
micolous/tm1640-rpi
|
doc/source/conf.py
|
Python
|
gpl-3.0
| 8,144
| 0.007859
|
# -*- coding: utf-8 -*-
#
# tm1640-rpi documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 12 19:52:17 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../src/python/'))
# hack for readthedocs to cause it to run doxygen first
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
from subprocess import call
call('doxygen')
del call
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'breathe']
breathe_projects = {'tm1640-rpi': 'doxygen-xml/'}
breathe_default_project = 'tm1640-rpi'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tm1640-rpi'
copyright = u'2013, Michael Farrell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tm1640-rpidoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tm1640-rpi.tex', u'tm1640-rpi Documentation',
u'Michael Farrell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents
|
to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tm1640-rpi', u'tm1640-rpi Documentation',
[u'Michael Farrell'], 1)
]
# If true, show URL addresses after
|
external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tm1640-rpi', u'tm1640-rpi Documentation',
u'Michael Farrell', 'tm1640-rpi', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
autoclass_content = 'both'
|
snipsco/teleport
|
src/kubeconfig/kubectl_actions.py
|
Python
|
mit
| 2,287
| 0.020988
|
# encoding: utf-8
import json
import time
from kubectl_data import *
from kubectl_ports import *
from kubectl_wrapper import *
TMP_FILEPATH = '/tmp/'
def create_tmp_json(data, service_path):
with open(service_path, 'w') as out:
json.dump(data, out, indent=2)
def sub_start(service_name, data, kube_type):
filepath = TMP_FILEPATH + service_name + '-' + kube_type + '.json'
kube_data = data.get(kube_type, dict())
create_tmp_json(kube_data, filepath)
create(filepath)
def sub_stop(service_name, data, kube_type):
filepath = TMP_FILEPATH + service_name + '-' + kube_type + '.json'
kube_data = data.get(kube_type, dict())
create_tmp_json(kube_data, filepath)
delete(filepath)
'''
Actions
'''
def kubectl_used_ports(subdomain):
return get_used_ports(subdomain)
def kubectl_available_ports(subdomain):
return get_available_ports(subdomain)
def ku
|
bectl_register(filepath):
data = get_data_yaml(filepath)
register_data(data)
def kubectl_start(se
|
rvice_name):
data = get_data(service_name)
sub_start(service_name, data, 'service')
time.sleep(1)
sub_start(service_name, data, 'replicationcontroller')
def kubectl_stop(service_name):
data = get_data(service_name)
sub_stop(service_name, data, 'replicationcontroller')
sub_stop(service_name, data, 'service')
time.sleep(1)
def kubectl_list():
return get_all_names()
def kubectl_startall():
services = get_all_names()
for service in services:
kubectl_start(service)
def kubectl_status(ressources, all_namespaces):
return status(ressources, all_namespaces)
def kubectl_status_nodes():
return nodes()
def kubectl_logs(service_name, f):
pods = pods_name_from_label(service_name)
pods_list = filter(lambda x: x != '', pods.split('\n'))
if not pods_list:
print 'No pods found'
return
elif len(pods_list) > 1:
format_list = '\n'.join(pods_list) + '\n\nName: '
answer = raw_input('Multiple pods under this service, please choose one by selecting the name: \n' + format_list)
return logs(answer, f)
else:
pod_name = pods_list[0].split(' ')[0]
return logs(pod_name, f)
def kubectl_describe(service_name):
found_pods_and_exec_func(service_name, describe)
def kubectl_connect(service_name):
found_pods_and_exec_func(service_name, connect)
|
eduNEXT/edunext-platform
|
openedx/core/djangoapps/bookmarks/apps.py
|
Python
|
agpl-3.0
| 1,146
| 0.001745
|
"""
Configuration for bookmarks Django app
"""
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from edx_django_utils.plugins import PluginSettings, PluginURLs
from openedx.core.djangoapps.plugins.constants import ProjectType, SettingsType
class BookmarksConfig(AppConfig):
"""
Configuration class for bookmarks Django app
"""
name = 'openedx.core.djangoapps.bookmarks'
verbose_name = _("Bookmarks")
plugin_app = {
PluginURLs.CONFIG: {
ProjectType.LMS: {
PluginURLs.NAMESPACE: '',
PluginURLs.REGEX: '^api/bookmarks/',
PluginURLs.RELATIVE_PATH: 'urls',
}
},
PluginSettings.CONFIG: {
ProjectType.LMS: {
SettingsType.PRODUCTION: {PluginSettings.RELATIVE_PATH: 'settings.production'}
|
,
SettingsType.COMMON: {PluginSettings.RELATIVE_PATH: 'settings.common'},
}
}
|
}
def ready(self):
# Register the signals handled by bookmarks.
from . import signals # lint-amnesty, pylint: disable=unused-import
|
wiredfool/fmod
|
fmod/controllers/ping.py
|
Python
|
gpl-2.0
| 5,311
| 0.046131
|
import logging
from pylons import config, request, response, session, tmpl_context as c
from pylons.controllers.util import abort
from fmod.lib.base import BaseController, render
from fmod import model
from sqlalchemy import desc
log = logging.getLogger(__name__)
from hashlib import md5
import time, datetime
#useful for this case.
from fmod.model import Ping, ImageHistory
from flickrapi import FlickrAPI
class PingController(BaseController):
def index(self):
c.results=[]
c.username = session.get('user')
c.fl_mod = session.get('mod',False)
images = {}
flSave = False
for ping in Ping.query().filter(Ping.fl_decided==False).order_by(Ping.id):
if not images.get(ping.image):
img = ping.Image_fromPing()
if img.in_pool():
images[ping.image] = True
c.results.append(ping)
if len(c.results) >= 2:
break
else:
flSave=True
ping.fl_decided=True
if flSave: ping.commit()
return render('ping.mako')
def more(self, id=None):
# id will be something like d_ping_[ping.id]
# so, I want to get a ping where id > that one.
pid = id.split('_')[-1]
try:
pid = int(pid)
except:
log.debug("couldn't identify the ping %s "%id)
return ""
c.username = session.get('user')
c.fl_mod = session.get('mod',False)
filter_images = dict([(ping.image,True) for ping in
Ping.query().filter(Ping.fl_decided==False).filter(Ping.id<=pid)])
for ping in Ping.query().filter(Ping.fl_decided==False).filter(Ping.id>pid).or
|
der_by(Ping.id):
if not ping.image in filter_
|
images:
img = ping.Image_fromPing()
if img.in_pool():
c.ping=ping
c.image=ping.image
c.atts = img.all_atts()
return render('one_ping.mako')
else:
ping.fl_decided=True
ping.commit()
def _fmtTime(self, t=None):
if t!= None and hasattr(t, 'timetuple'):
t = time.mktime(t.timetuple())
return time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(t))
def rss(self):
response.charset='utf8'
response.headers['content-type'] = 'text/xml; charset=UTF-8'
c.items=[]
images = {}
for ping in Ping.query().filter(Ping.fl_decided==False).order_by(desc(Ping.id)):
if not images.get(ping.image):
img = ping.Image_fromPing()
if img.in_pool():
images[ping.image] = True
img.all_atts()
c.items.append((ping,img))
if len(c.results) >= 20:
break
c.fmtTime = self._fmtTime
return render('rss.mako')
def ping(self):
log.debug('In Ping')
params = {'nsid':'nsid', # the pinging user, this is static.
'uid':'username', # our userid
'id' :'image', # image id
'own':'owner', # image owner
'sec':'secret', # image secret, from flickr
'con':'context', # context - in group pool
}
# 's':None # signature
# check sig --
nsid = request.params.get('nsid')
if nsid:
u = model.User.get_byNsid(nsid)
else:
u = model.User.get_byName(request.params.get('uid'))
if not u:
log.debug('user not found for ping: %s'%request.query_string)
return ''
log.debug(request.query_string)
log.debug(request.query_string[:-35]+u.secret)
log.debug(request.params.get('s'))
log.debug(md5(request.query_string[:-35]+u.secret).hexdigest().lower())
if md5(request.query_string[:-35]+u.secret).hexdigest().lower() != request.params.get('s'):
log.debug('bad signature')
return ''
else:
log.debug('good signature')
p = Ping()
for (arg, att) in params.items():
# must filter!!!
val = request.params.get(arg,'')
log.debug("setting %s to %s"% (att, val))
if val:
setattr(p, att, val)
p.username = u.username
#p.begin()
p.save()
p.commit()
if request.params.get('v',False) == '2':
#version 2 response.
response.headers['content-type'] = 'text/javascript'
return """YUI().use('node', function(Y) {Y.one('#context-num-pool-71917374__at__N00').insert(document.createTextNode(' (Flagged) '), 'before')})"""
else:
#version 1 response
""" q='uid='+uid+'&id='+p.id+'&own='+p.ownerNsid+'&sec='+p.secret+'&con='+nextprev_currentContextID;
i.src='http://192.168.10.99:5000/p?'+q+'s='+md5_calcMD5(q+s);
"""
response.headers['content-type'] = 'text/javascript'
return """Y.D.get('contextTitle_pool71917374@N00').appendChild(document.createTextNode('(Flagged)'))"""
def dup_scan(self):
log.debug('dup ping')
fapi = FlickrAPI(config['api_key'], config['api_secret'], token=config['api_token'])
try:
rsp = fapi.groups_pools_getPhotos(api_key=config['api_key'],
group_id=config['group_id'],
extras='last_update',
per_page='50',
page='1',
token=config['api_token'])
except Exception,msg:
log.debug(msg.args)
return False
photos = rsp.find('photos')
for photo in photos.getchildren():
image = photo.get('id')
dt = int(photo.get('dateadded'))
if ImageHistory.get(image=image, dt=dt):
log.debug('found high water mark, quitting')
break
if ImageHistory.get_all(image=image):
log.debug('found a re-add')
p = Ping()
p.image = image
p.owner = photo.get('owner')
p.reason = "Bump"
p.username = 'RoboMod'
p.save()
Ping.commit()
ih = ImageHistory()
ih.image = image
ih.dt = dt
ih.save()
ImageHistory.commit()
return "successful"
|
SimpleTax/python-simpletax
|
simpletax/__init__.py
|
Python
|
bsd-3-clause
| 52
| 0.038462
|
from api import ServerE
|
rror,NoAccessError,S
|
impleTax
|
IdeaSolutionsOnline/ERP4R
|
core/objs/sr_crianca.py
|
Python
|
mit
| 3,036
| 0.044069
|
# !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = 'CVt
|
ek dev'
__credits__ = []
__version__ = "1.0"
__maintainer__ = "CVTek dev"
__status__ = "Development"
__model_name__ = 'sr_crianca.SRCrianca'
import auth, base_models
from orm import *
from form import *
class SRCrianca(Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'sr_crianca'
self.__title__ ='Inscrição e Identificação da Criança'
self.__model_name__ = __model_name__
self.__list_edit_mod
|
e__ = 'edit'
self.__get_options__ = ['nome'] # define tambem o campo a ser mostrado no m2m, independentemente da descricao no field do m2m
self.__order_by__ = 'sr_crianca.nome'
self.__tabs__ = [
('Pré-Natal', ['sr_pre_natal']),
('Neo-Natal', ['sr_neo_natal']),
('Irmãos', ['sr_crianca']),
]
#choice field com a estrutura de saude
self.numero_inscricao = integer_field(view_order = 1, name = 'Nº de Inscrição', size = 40)
self.primeira_consulta = date_field(view_order = 2, name = 'Primeira Consulta', size=40, args = 'required', default = datetime.date.today(), onlist = False)
self.nome = string_field(view_order = 3, name = 'Nome', size = 70, onlist = True)
self.sexo = combo_field(view_order = 4, name = 'Sexo', size = 40, default = 'Feminino', options = [('feminino','Feminino'), ('masculino','Masculino')], onlist = True)
self.data_nascimento = date_field(view_order = 5, name = 'Data Nascimento', size=40, args = 'required', onlist = True)
self.hora_nascimento = time_field(view_order=7, name ='Hora Nascimento', size=40, onlist=False, args='required')
self.numero_registo = string_field(view_order = 8, name = 'Nº Registo', size = 40, onlist = False)
self.data_registo = date_field(view_order = 9, name = 'Data Registo', size=40, args = 'required')
self.nome_pai = string_field(view_order = 10, name = 'Nome do Pai', size = 60, onlist=False)
self.nome_mae = string_field(view_order = 11, name = 'Nome do Mãe', size = 60)
self.endereco_familia = text_field(view_order=12, name='Endereço Familia', size=70, args="rows=30", onlist=False, search=False)
self.telefone = string_field(view_order = 13, name = 'Telefone', size = 40, onlist = True)
self.estado = combo_field(view_order = 14, name = 'Estado', size = 40, default = 'active', options = [('active','Activo'), ('canceled','Cancelado')], onlist = True)
self.sr_pre_natal = list_field(view_order=15, name = 'Informações Pré-Natal', fields=['duracao_gravidez'], condition="crianca='{id}'", model_name='sr_pre_natal.SRPreNatal', list_edit_mode='inline', onlist = False)
self.sr_neo_natal = list_field(view_order=16, name = 'Informações Neo-Natal', column='local_parto', condition="sr_crianca='{id}'", model_name='sr_neo_natal.SRNeoNatal', list_edit_mode='inline', onlist = False)
|
arokem/PyEMMA
|
pyemma/coordinates/tests/test_numpyfilereader.py
|
Python
|
bsd-2-clause
| 5,975
| 0.002343
|
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on 07.04.2015
@author: marscher
'''
import unittest
import tempfile
import numpy as np
from pyemma.coordinates.data.numpy_filereader import NumPyFileReader
from pyemma.util.log import getLogger
import shutil
class TestNumPyFileReader(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.logger = getLogger(cls.__class__.__name__)
d = np.arange(3 * 100).reshape((100, 3))
d2 = np.arange(300, 900).reshape((200,3))
d_1d = np.random.random(100)
cls.dir = tempfile.mkdtemp(prefix='pyemma_npyreader')
cls.f1 = tempfile.mktemp(suffix='.npy', dir=cls.dir)
cls.f2 = tempfile.mktemp(suffix='.npy', dir=cls.dir)
cls.f3 = tempfile.mktemp(suffix='.npz', dir=cls.dir)
cls.f4 = tempfile.mktemp(suffix='.npy', dir=cls.dir)
# 2d
np.save(cls.f1, d)
np.save(cls.f4, d2)
# 1d
np.save(cls.f2, d_1d)
np.savez(cls.f3, d, d)
cls.files2d = [cls.f1, cls.f4] #cls.f3]
cls.files1d = [cls.f2]
cls.d = d
cls.d_1d = d_1d
cls.npy_files = [f for f in cls.files2d if f.endswith('.npy')]
cls.npz = cls.f3
return cls
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.dir, ignore_errors=True)
def test_only_npy(self):
reader = NumPyFileReader(self.npy_files)
from_files = [np.load(f) for f in self.npy_files]
concatenated = np.vstack(from_files)
output = reader.get_output()
self.assertEqual(reader.number_of_trajectories(), len(self.npy_files))
self.assertEqual(reader.n_frames_total(), concatenated.shape[0])
for x, y in zip(output, from_files):
np.testing.assert_array_almost_equal(x, y)
def test_small_chunks(self):
reader = NumPyFileReader(self.npy_files)
reader.chunksize = 30
from_files = [np.load(f) for f in self.npy_files]
concatenated = np.vstack(from_files)
output = reader.get_output()
self.assertEqual(reader.number_of_trajectories(), len(self.npy_files))
self.assertEqual(reader.n_frames_total(), concatenated.shape[0])
for x, y in zip(output, from_files):
np.testing.assert_array_almost_equal(x, y)
def testSingleFile(self):
reader = NumPyFileReader(self.npy_files[0])
self.assertEqual(reader.n_frames_total(), self.d.shape[0])
@unittest.skip("npz currently unsupported")
def test_npz(self):
reader = NumPyFileReader(self.npz)
all_data = read
|
er.get_output()
fh = np.load(self.npz)
data = [x[1] for x in fh.items()]
fh.close()
self.assertEqual(reader.number_of_trajectories(), len(data))
for outp, inp in zip(all_data, data):
np.testing.assert_equal(outp, inp)
def test_stridden_access(self):
reader = NumPyFileReader(self.f1)
reader.chunksize = 10
wanted = np.load(self.f1)
for stride in [2, 3, 5, 7, 15]:
first_traj =
|
reader.get_output(stride=stride)[0]
np.testing.assert_equal(first_traj, wanted[::stride],
"did not match for stride %i" % stride)
def test_lagged_stridden_access(self):
reader = NumPyFileReader(self.f1)
strides = [2, 3, 5, 7, 15]
lags = [1, 3, 7, 10, 30]
for stride in strides:
for lag in lags:
chunks = []
for _, _, Y in reader.iterator(stride, lag):
chunks.append(Y)
chunks = np.vstack(chunks)
np.testing.assert_equal(chunks, self.d[lag::stride])
def test_lagged_stridden_access_multiple_files(self):
reader = NumPyFileReader(self.files2d)
print reader.trajectory_lengths()
strides = [2, 3, 5, 7, 15]
lags = [1, 3, 7, 10, 30]
for stride in strides:
for lag in lags:
chunks = {i: [] for i in xrange(reader.number_of_trajectories())}
for itraj, _, Y in reader.iterator(stride, lag):
chunks[itraj].append(Y)
for i, k in enumerate(chunks.itervalues()):
stack = np.vstack(k)
d = np.load(self.files2d[i])
np.testing.assert_equal(stack, d[lag::stride],
"not equal for stride=%i"
" and lag=%i" % (stride, lag))
if __name__ == "__main__":
unittest.main()
|
DomDomPow/snapventure
|
snapventure-backend/snapventure/migrations/0005_auto_20161103_0856.py
|
Python
|
gpl-3.0
| 1,892
| 0.003171
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-03 08:56
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('snapventure', '0004_auto_20161102_2043'),
]
operations = [
migrations.CreateModel(
name='Inscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
('name', models.CharField(max_length=255)),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('journey', models.ForeignKey(on
|
_delete=django.db.models.deletion.CASCADE, to='snapventure.Journey')),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(blank=True, max_length=500)),
('location', models.CharField(blank=True, max_length=30)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='inscription',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snapventure.Profile'),
),
migrations.AddField(
model_name='journey',
name='inscriptions',
field=models.ManyToManyField(through='snapventure.Inscription', to='snapventure.Profile'),
),
]
|
stonekyx/binary
|
vendor/scons-local-2.3.4/SCons/Scanner/Dir.py
|
Python
|
gpl-3.0
| 3,751
| 0.002399
|
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Dir.py 2014/09/27 12:51:43 garyo"
import SCons.Node.FS
import SCons.Scanner
def only_dirs(nodes):
is_Dir = lambda n: isinstance(n.disambiguate(), SCons.Node.FS.Dir)
return list(filter(is_Dir, nodes))
def DirScanner(**kw):
"""Return a prototype Scanner instance for scanning
directories for on-disk files"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = only_dirs
return SCons.Scanner.Base(scan_on_disk, "DirScanner", **kw)
def DirEntryScanner(**kw):
"""Return a prototype Scanner instance for "scanning"
directory Nodes for their in-memory entries"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = None
return SCons.Scanner.Base(scan_in_memory, "DirEntryScanner", **kw)
skip_entry = {}
skip_entry_list = [
'.',
'..',
'.sconsign',
# Used by the native dblite.py module.
'.sconsign.dblite',
# Used by dbm and dumbdbm.
'.sconsign.dir',
# Used by dbm.
'.sconsign.pag',
# Used by dumbdbm.
'.sconsign.dat',
'.sconsign.bak',
# Used by some dbm emulations using Berkeley DB.
'.sconsign.db',
]
for skip in skip_entry_list:
skip_entry[skip] = 1
skip_entry[SCons.Node.FS._my_normcase(skip)] = 1
do_not_scan = lambda k: k not in skip_entry
def scan_on_disk(node, env, path=()):
"""
Scans a directory for on-disk files and directories therein.
Looking up the entries will add these to the in-memory Node tree
representation of the file system, so all we have to do is just
that and then call the in-memory scanning function.
"""
try:
flist = node.fs.listdir(node.abspath)
except (IOError, OSError):
return []
e = node.Entry
for f in filter(do_not_scan, flist):
# Add ./ to the beginning of the file name so if it begins with a
# '#' we don't look it up relative to the top-level directory.
e('./' + f)
|
return scan_in_memory(node, env, path)
def scan_in_memory(node, env, path=()):
"""
"Scans" a Node.FS.Dir for its in-me
|
mory entries.
"""
try:
entries = node.entries
except AttributeError:
# It's not a Node.FS.Dir (or doesn't look enough like one for
# our purposes), which can happen if a target list containing
# mixed Node types (Dirs and Files, for example) has a Dir as
# the first entry.
return []
entry_list = sorted(filter(do_not_scan, list(entries.keys())))
return [entries[n] for n in entry_list]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
MSEMJEJME/ReAlistair
|
renpy/lint.py
|
Python
|
gpl-2.0
| 13,359
| 0.007411
|
# Copyright 2004-2010 PyTom <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import renpy
import codecs
import os
import os.path
import time
image_prefixes = None
filenames = None
# Things to check in lint.
#
# Image files exist, and are of the right case.
# Jump/Call targets defined.
# Say whos can evaluate.
# Call followed by say.
# Show/Scene valid.
# At valid.
# With valid.
# Hide maybe valid.
# Expressions can compile.
# The node the report will be about:
report_node = None
# Reports a message to the user.
def report(msg, *args):
if report_node:
out = u"%s:%d " % (renpy.parser.unicode_filename(report_node.filename), report_node.linenumber)
else:
out = ""
out += msg % args
print
print out.encode('utf-8')
added = { }
# Reports additional information about a message, the first time it
# occurs.
def add(msg):
if not msg in added:
added[msg] = True
print unicode(msg).encode('utf-8')
# Trys to evaluate an expression, announcing an error if it fails.
def try_eval(where, expr, additional=None):
try:
renpy.python.py_eval(expr)
except:
report( "Could not ev
|
aluate '%s', in %s.", expr, where)
if additional:
add(additional)
# Returns True of the expression can be compiled as python, False
# otherwise.
def try_compile(where, expr):
try:
renpy.python.
|
py_compile_eval_bytecode(expr)
except:
report("'%s' could not be compiled as a python expression, %s.", expr, where)
# This reports an error if we're sure that the image with the given name
# does not exist.
def image_exists(name, expression, tag):
# Add the tag to the set of known tags.
tag = tag or name[0]
image_prefixes[tag] = True
if expression:
return
name = list(name)
names = " ".join(name)
while name:
if tuple(name) in renpy.exports.images:
return
name.pop()
report("The image named '%s' was not declared.", names)
# Only check each file once.
check_file_cache = { }
def check_file(what, fn):
present = check_file_cache.get(fn, None)
if present is True:
return
if present is False:
report("%s uses file '%s', which is not loadable.", what.capitalize(), fn)
return
if not renpy.loader.loadable(fn):
report("%s uses file '%s', which is not loadable.", what.capitalize(), fn)
check_file_cache[fn] = False
return
check_file_cache[fn] = True
try:
renpy.loader.transfn(fn)
except:
return
if renpy.loader.transfn(fn) and \
fn.lower() in filenames and \
fn != filenames[fn.lower()]:
report("Filename case mismatch for %s. '%s' was used in the script, but '%s' was found on disk.", what, fn, filenames[fn.lower()])
add("Case mismatches can lead to problems on Mac, Linux/Unix, and when archiving images. To fix them, either rename the file on disk, or the filename use in the script.")
def check_displayable(what, d):
files = [ ]
def files_callback(img):
files.extend(img.predict_files())
d.predict(files_callback)
for fn in files:
check_file(what, fn)
# Lints ast.Image nodes.
def check_image(node):
name = " ".join(node.imgname)
check_displayable('image %s' % name, renpy.exports.images[node.imgname])
def imspec(t):
if len(t) == 3:
return t[0], None, None, t[1], t[2], 0
if len(t) == 6:
return t[0], t[1], t[2], t[3], t[4], t[5], None
else:
return t
# Lints ast.Show and ast.Scene nodets.
def check_show(node):
# A Scene may have an empty imspec.
if not node.imspec:
return
name, expression, tag, at_list, layer, zorder, behind = imspec(node.imspec)
if layer not in renpy.config.layers and layer not in renpy.config.top_layers:
report("Uses layer '%s', which is not in config.layers.", layer)
image_exists(name, expression, tag)
for i in at_list:
try_eval("the at list of a scene or show statment", i, "Perhaps you forgot to declare, or misspelled, a position?")
# Lints ast.Hide.
def check_hide(node):
name, expression, tag, at_list, layer, zorder, behind = imspec(node.imspec)
tag = tag or name[0]
if layer not in renpy.config.layers and layer not in renpy.config.top_layers:
report("Uses layer '%s', which is not in config.layers.", layer)
if tag not in image_prefixes:
report("The image tag '%s' is not the prefix of a declared image, nor was it used in a show statement before this hide statement.", tag)
# for i in at_list:
# try_eval(node, "at list of hide statment", i)
def check_with(node):
try_eval("a with statement or clause", node.expr, "Perhaps you forgot to declare, or misspelled, a transition?")
def check_user(node):
def error(msg):
report("%s", msg)
renpy.exports.push_error_handler(error)
try:
node.call("lint")
finally:
renpy.exports.pop_error_handler()
try:
node.get_next()
except:
report("Didn't properly report what the next statement should be.")
check_text_tags = renpy.display.text.check_text_tags
def text_checks(s):
msg = renpy.display.text.check_text_tags(s)
if msg:
report("%s (in %s)", msg, repr(s)[1:])
if "%" in s:
state = 0
pos = 0
fmt = ""
while pos < len(s):
c = s[pos]
pos += 1
# Not in a format.
if state == 0:
if c == "%":
state = 1
fmt = "%"
# In a format.
elif state == 1:
fmt += c
if c == "(":
state = 2
elif c in "#0123456780- +hlL":
state = 1
elif c in "diouxXeEfFgGcrs%":
state = 0
else:
report("Unknown string format code '%s' (in %s)", fmt, repr(s)[1:])
state = 0
# In a mapping key.
elif state == 2:
fmt += c
if c == ")":
state = 1
if state != 0:
report("Unterminated string format code '%s' (in %s)", fmt, repr(s)[1:])
def check_say(node):
if node.who:
try_eval("the who part of a say statement", node.who, "Perhaps you forgot to declare a character?")
if node.with_:
try_eval("the with clause of a say statement", node.with_, "Perhaps you forgot to declare, or misspelled, a transition?")
text_checks(node.what)
def check_menu(node):
if node.with_:
try_eval("the with clause
|
tseaver/google-cloud-python
|
datastore/google/cloud/datastore/batch.py
|
Python
|
apache-2.0
| 12,054
| 0.000166
|
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with a batch of updates / deletes.
Batches provide the ability to execute multiple operations
in a single request to the Cloud Datastore API.
See
https://cloud.google.com/datastore/docs/concepts/entities#batch_operations
"""
from google.cloud.datastore import helpers
from google.cloud.datastore_v1.proto import datastore_pb2 as _datastore_pb2
class Batch(object):
"""An abstraction representing a collected group of updates / deletes.
Used to build up a bulk mutation.
For example, the following snippet of code will put the two ``save``
operations and the ``delete`` operation into the same mutation, and send
them to the server in a single API request::
>>> from google.cloud import datastore
>>> client = datastore.Client()
>>> batch = client.batch()
>>> batch.begin()
>>> batch.put(entity1)
>>> batch.put(entity2)
>>> batch.delete(key3)
>>> batch.commit()
You can also use a batch as a context manager, in which case
:meth:`commit` will be called automatically if its block exits without
raising an exception::
>>> with batch:
... batch.put(entity1)
... batch.put(entity2)
... batch.delete(key3)
By default, no updates will be sent if the block exits with an error::
>>> with batch:
... do_some_work(batch)
... raise Exception() # rolls back
:type client: :class:`google.cloud.datastore.client.Client`
:param client: The client used to connect to datastore.
"""
_id = None # "protected" attribute, always None for non-transactions
_INITIAL = 0
"""Enum value for _INITIAL status of batch/transaction."""
_IN_PROGRESS = 1
"""Enum value for _IN_PROGRESS status of batch/transaction."""
_ABORTED = 2
"""Enum value for _ABORTED status of batch/transaction."""
_FINISHED = 3
"""Enum value for _FINISHED status of batch/transaction."""
def __init__(self, client):
self._client = client
self._mutations = []
self._partial_key_entities = []
self._status = self._INITIAL
def current(self):
"""Return the topmost batch / transaction, or None."""
return self._client.current_batch
@property
def project(self):
"""Getter for project in which the batch will run.
:rtype: :class:`str`
:returns: The project in which the batch will run.
"""
return self._client.project
@property
def namespace(self):
"""Getter for namespace in which the batch will run.
:rtype: :class:`str`
:returns: The namespace in which the batch will run.
"""
return self._client.namespace
def _add_partial_key_entity_pb(self):
"""Adds a new mutation for an entity with a partial key.
:rtype: :class:`.entity_pb2.Entity`
:returns: The newly created entity protobuf that will be
updated and sent with a c
|
ommit.
"""
new_mutation = _datastore_pb2.Mutation()
self._mutations.append(new_mutation)
return new_mutation.insert
def _add_complete_key_entity_pb(self):
"""Adds a new mutation for an entity with a completed key.
:rtype: :class:`.entity_pb2.Entity`
:returns: The newly created en
|
tity protobuf that will be
updated and sent with a commit.
"""
# We use ``upsert`` for entities with completed keys, rather than
# ``insert`` or ``update``, in order not to create race conditions
# based on prior existence / removal of the entity.
new_mutation = _datastore_pb2.Mutation()
self._mutations.append(new_mutation)
return new_mutation.upsert
def _add_delete_key_pb(self):
"""Adds a new mutation for a key to be deleted.
:rtype: :class:`.entity_pb2.Key`
:returns: The newly created key protobuf that will be
deleted when sent with a commit.
"""
new_mutation = _datastore_pb2.Mutation()
self._mutations.append(new_mutation)
return new_mutation.delete
@property
def mutations(self):
"""Getter for the changes accumulated by this batch.
Every batch is committed with a single commit request containing all
the work to be done as mutations. Inside a batch, calling :meth:`put`
with an entity, or :meth:`delete` with a key, builds up the request by
adding a new mutation. This getter returns the protobuf that has been
built-up so far.
:rtype: iterable
:returns: The list of :class:`.datastore_pb2.Mutation`
protobufs to be sent in the commit request.
"""
return self._mutations
def put(self, entity):
"""Remember an entity's state to be saved during :meth:`commit`.
.. note::
Any existing properties for the entity will be replaced by those
currently set on this instance. Already-stored properties which do
not correspond to keys set on this instance will be removed from
the datastore.
.. note::
Property values which are "text" ('unicode' in Python2, 'str' in
Python3) map to 'string_value' in the datastore; values which are
"bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
When an entity has a partial key, calling :meth:`commit` sends it as
an ``insert`` mutation and the key is completed. On return,
the key for the ``entity`` passed in is updated to match the key ID
assigned by the server.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: :class:`~exceptions.ValueError` if the batch is not in
progress, if entity has no key assigned, or if the key's
``project`` does not match ours.
"""
if self._status != self._IN_PROGRESS:
raise ValueError("Batch must be in progress to put()")
if entity.key is None:
raise ValueError("Entity must have a key")
if self.project != entity.key.project:
raise ValueError("Key must be from same project as batch")
if entity.key.is_partial:
entity_pb = self._add_partial_key_entity_pb()
self._partial_key_entities.append(entity)
else:
entity_pb = self._add_complete_key_entity_pb()
_assign_entity_to_pb(entity_pb, entity)
def delete(self, key):
"""Remember a key to be deleted during :meth:`commit`.
:type key: :class:`google.cloud.datastore.key.Key`
:param key: the key to be deleted.
:raises: :class:`~exceptions.ValueError` if the batch is not in
progress, if key is not complete, or if the key's
``project`` does not match ours.
"""
if self._status != self._IN_PROGRESS:
raise ValueError("Batch must be in progress to delete()")
if key.is_partial:
raise ValueError("Key must be complete")
if self.project != key.project:
raise ValueError("Key must be from same project as batch")
key_pb = key.to_protobuf()
self._add_delete_key_pb().CopyFrom(key_pb)
def begin(self):
"""Begins a batch.
This method is called automatically when entering a with
statement, however it can be called explicitly if you don't want
to use a context manager.
|
Zephyrrus/ubb
|
YEAR 1/SEM1/FP/LAB/l6-l9/Repository/SQLLoader.py
|
Python
|
mit
| 2,336
| 0.003853
|
# NEVER DO THIS IN SQL!
from Repository.Loader import Loader, LoaderException
from Domain import Grade, Student, Discipline
import sqlite3
class SQLLoader(Loader):
def __init__(self, repo):
self.repo = repo
self.conn = sqlite3.connect(self.repo.getStoragePath() + ".sqlite")
self.curso
|
r = self.conn.cursor()
def save(self):
# serializable = {'students': [], 'disciplines': [], 'grades': []}
self.cursor.execute('''DROP TABLE IF EXISTS student
|
s;''')
self.cursor.execute('''DROP TABLE IF EXISTS disciplines;''')
self.cursor.execute('''DROP TABLE IF EXISTS grades;''')
# eww
self.cursor.execute('''CREATE TABLE students (id int, name text)''')
self.cursor.execute('''CREATE TABLE disciplines (id int, name text)''')
self.cursor.execute('''CREATE TABLE grades (did int, sid int, grade int)''')
serializable = {
'students': [(student.getId(), student.getName()) for student in self.repo.getStudents()],
'disciplines': [(discipline.getId(), discipline.getName()) for discipline in
self.repo.getDisciplines()],
'grades': [(grade.getDisciplineId(), grade.getStudentId(), grade.getGrade()) for grade in
self.repo.getGrades()]}
self.cursor.executemany('INSERT INTO students VALUES (?,?)', serializable['students'])
self.cursor.executemany('INSERT INTO disciplines VALUES (?,?)', serializable['disciplines'])
self.cursor.executemany('INSERT INTO grades VALUES (?,?,?)', serializable['grades'])
self.conn.commit()
def load(self):
try:
self.repo._createNewRepo()
for row in self.cursor.execute('SELECT * FROM students'):
self.repo.addStudent(Student.Student(row[0], row[1]), False)
for row in self.cursor.execute('SELECT * FROM disciplines'):
self.repo.addDiscipline(Discipline.Discipline(row[0], row[1]), False)
for row in self.cursor.execute('SELECT * FROM grades'):
self.repo.addGrade(Grade.Grade(row[0], row[1], row[2]), False)
return True
except Exception as ex:
print('[StudentRepository]', ex)
return False
# eval studentCatalogController._repo._converter(0)
|
gramps-project/gramps
|
gramps/gen/utils/symbols.py
|
Python
|
gpl-2.0
| 7,831
| 0.001022
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2015- Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# https://en.wikipedia.org/wiki/Miscellaneous_Symbols
# http://www.w3schools.com/charsets/ref_utf_symbols.asp
#
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.config import config
_ = glocale.translation.sgettext
# pylint: disable=superfluous-parens
# pylint: disable=anomalous-unicode-escape-in-string
class Symbols(object):
# genealogical symbols
SYMBOL_FEMALE = 0
SYMBOL_MALE = 1
SYMBOL_ASEXUAL_SEXLESS = 2 # Unknown
SYMBOL_LESBIAN = 3
SYMBOL_MALE_HOMOSEXUAL = 4
SYMBOL_HETEROSEXUAL = 5
SYMBOL_HERMAPHRODITE = 6
SYMBOL_TRANSGENDER = 7
SYMBOL_NEUTER = 8
SYMBOL_ILLEGITIM = 9
SYMBOL_BIRTH = 10
SYMBOL_BAPTISM = 11 # CHRISTENING
SYMBOL_ENGAGED = 12
SYMBOL_MARRIAGE = 13
SYMBOL_DIVORCE = 14
SYMBOL_UNMARRIED_PARTNERSHIP = 15
SYMBOL_BURIED = 16
SYMBOL_CREMATED = 17 # Funeral urn
SYMBOL_KILLED_IN_ACTION = 18
SYMBOL_EXTINCT = 19
# genealogical death symbols
DEATH_SYMBOL_NONE = 0
DEATH_SYMBOL_X = 1
DEATH_SYMBOL_SKULL = 2
DEATH_SYMBOL_ANKH = 3
DEATH_SYMBOL_ORTHODOX_CROSS = 4
DEATH_SYMBOL_CHI_RHO = 5
DEATH_SYMBOL_LORRAINE_CROSS = 6
DEATH_SYMBOL_JERUSALEM_CROSS = 7
DEATH_SYMBOL_STAR_CRESCENT = 8
DEATH_SYMBOL_WEST_SYRIAC_CROSS = 9
DEATH_SYMBOL_EAST_SYRIAC_CROSS = 10
DEATH_SYMBOL_HEAVY_GREEK_CROSS = 11
DEATH_SYMBOL_LATIN_CROSS = 12
DEATH_SYMBOL_SHADOWED_LATIN_CROSS = 13
DEATH_SYMBOL_MALTESE_CROSS = 14
DEATH_SYMBOL_STAR_OF_DAVID = 15
DEATH_SYMBOL_DEAD = 16
def __init__(self):
self.symbols = None
self.all_symbols = [
# Name UNICODE SUBSTITUTION
(_("Female"), '\u2640', ""),
(_("Male"), '\u2642', ""),
(_("Asexuality, sexless, genderless"), '\u26aa', ""),
(_("Lesbianism"), '\u26a2', ""),
(_("Male homosexuality"), '\u26a3', ""),
(_("Heterosexuality"), '\u26a4', ""),
(_("Transgender, hermaphrodite (in entomology)"), '\u26a5', ""),
(_("Transgender"), '\u26a6', ""),
(_("Neuter"), '\u26b2', ""),
(_("Illegitimate"), '\u229b', ""),
(_("Birth"), '\u002a', config.get('utf8.birth-symbol')),
(_("Baptism/Christening"), '\u007e',
config.get('utf8.baptism-symbol')),
(_("Engaged"), '\u26ac', config.get('utf8.engaged-symbol')),
(_("Marriage"), '\u26ad', config.get('utf8.marriage-symbol')),
(_("Divorce"), '\u26ae', config.get('utf8.divorce-symbol')),
(_("Unmarried partnership"), '\u26af',
config.get('utf8.partner-symbol')),
(_("Buried"), '\u26b0', config.get('utf8.buried-symbol')),
(_("Cremated/Funeral urn"), '\u26b1',
config.get('utf8.cremated-symbol')),
(_("Killed in action"), '\u2694', config.get('utf8.killed-symbol')),
(_("Extinct"), '\u2021', "")]
# The following is used in the global preferences in the display tab.
# Name UNICODE SUBSTITUTION
self.death_symbols = [(_("Nothing"), "", ""),
("x", "x", "x"),
(_("Skull and crossbones"), "\u2620",
config.get('utf8.dead-symbol')),
(_("Ankh"), "\u2625",
config.get('utf8.dead-symbol')),
(_("Orthodox cross"), "\u2626",
config.get('utf8.dead-symbol')),
(_("Chi rho"), "\u2627",
config.get('utf8.dead-symbol')),
(_("Cross of Lorraine"), "\u2628",
config.get('utf8.dead-symbol')),
(_("Cross of Jerusalem"), "\u2629",
|
config.get('utf8.dead-symbol')),
(_("Star and crescent"), "\u262a",
config.get('utf8.dead-symbol')),
(_("West Syriac cross"), "\u2670",
config.get('utf8.dead-symbol'))
|
,
(_("East Syriac cross"), "\u2671",
config.get('utf8.dead-symbol')),
(_("Heavy Greek cross"), "\u271a",
config.get('utf8.dead-symbol')),
(_("Latin cross"), "\u271d",
config.get('utf8.dead-symbol')),
(_("Shadowed White Latin cross"), "\u271e",
config.get('utf8.dead-symbol')),
(_("Maltese cross"), "\u2720",
config.get('utf8.dead-symbol')),
(_("Star of David"), "\u2721",
config.get('utf8.dead-symbol')),
(_("Dead"), ("Dead"), _("Dead"))]
#
# functions for general symbols
#
def get_symbol_for_html(self, symbol):
""" return the html string like '⚪' """
return '&#%d;' % ord(self.all_symbols[symbol][1])
def get_symbol_name(self, symbol):
"""
Return the name of the symbol.
"""
return self.all_symbols[symbol][0]
def get_symbol_for_string(self, symbol):
""" return the utf-8 character like '\u2670' """
return self.all_symbols[symbol][1]
def get_symbol_fallback(self, symbol):
"""
Return the replacement string.
This is used if the utf-8 symbol in not present within a font.
"""
return self.all_symbols[symbol][2]
#
# functions for death symbols
#
def get_death_symbols(self):
"""
Return the list of death symbols.
This is used in the global preference to choose which symbol we'll use.
"""
return self.death_symbols
def get_death_symbol_name(self, symbol):
"""
Return the name of the symbol.
"""
return self.death_symbols[symbol][0]
def get_death_symbol_for_html(self, symbol):
"""
return the html string like '⚪'.
"""
return '&#%d;' % ord(self.death_symbols[symbol][1])
def get_death_symbol_for_char(self, symbol):
"""
Return the utf-8 character for the symbol.
"""
return self.death_symbols[symbol][1]
def get_death_symbol_fallback(self, symbol):
"""
Return the string replacement for the symbol.
"""
return self.death_symbols[symbol][2]
#
# functions for all symbols
#
def get_how_many_symbols(self):
return len(self.death_symbols) + len(self.all_symbols) - 4
|
juddc/Dipper
|
dip/tests/test_interpreter.py
|
Python
|
mit
| 5,801
| 0.001896
|
import sys
sys.path.insert(0, "../")
import unittest
from dip.typesystem import DNull, DBool, DInteger, DString, DList
from dip.compiler import BytecodeCompiler
from dip.interpreter import VirtualMachine
from dip.namespace import Namespace
class TestInterpreter(unittest.TestCase):
def _execute_simple(self, code, data):
result = [None]
def getresult(val):
|
result[0] = val
vm = VirtualMachine([], getresult)
globalns = Namespace("globals"
|
)
ctx = BytecodeCompiler("main", code, data, namespace=globalns)
globalns.set_func("main", ctx.mkfunc())
vm.setglobals(globalns)
vm.run(pass_argv=False)
return result[0]
def test_add(self):
result = self._execute_simple("""
ADD 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(32), # data0
DInteger.new_int(64), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 96)
def test_sub(self):
result = self._execute_simple("""
SUB 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(64), # data0
DInteger.new_int(32), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 32)
def test_mul(self):
result = self._execute_simple("""
MUL 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(64), # data0
DInteger.new_int(32), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 2048)
def test_div(self):
result = self._execute_simple("""
DIV 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(64), # data0
DInteger.new_int(2), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 32)
def test_jump(self):
result = self._execute_simple("""
JMP 2 # 0
RET 0 # 1
RET 1 # 2
""", [
DInteger.new_int(16), # data0
DInteger.new_int(32), # data1
])
self.assertEqual(result.int_py(), 32)
def test_len(self):
result = self._execute_simple("""
LEN 0 1 # 0
RET 1 # 1
""", [
DString.new_str("neat"), # data0
DInteger(), # data1
])
self.assertEqual(result.int_py(), 4)
def test_eq(self):
result = self._execute_simple("""
EQ 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(4), # data0
DInteger.new_int(5), # data1
DBool(), # data2
])
self.assertEqual(result.int_py(), False)
result = self._execute_simple("""
EQ 0 1 2 # 0
RET 2 # 1
""", [
DString.new_str("neat"), # data0
DString.new_str("neat"), # data1
DBool(), # data2
])
self.assertEqual(result.int_py(), True)
def test_branch(self):
result = self._execute_simple("""
EQ 0 1 2 # 0
BF 2 3 # 1
RET 0 # 2
LABEL :some_label # 3
RET 3 # 4
""", [
DInteger.new_int(4), # data0
DInteger.new_int(5), # data1
DBool(), # data2
DInteger.new_int(999), # data3
])
self.assertEqual(result.int_py(), 999)
def test_lists(self):
result = self._execute_simple("""
LIST_NEW 0
LIST_ADD 0 1 # 0 data0.append(data1)
LIST_ADD 0 1 # 1 data0.append(data1)
LIST_ADD 0 2 # 2 data0.append(data2)
LEN 0 3 # 3 data3 = len(data0)
EQ 3 5 6 # 4 data6 = (data3 == data5)
LIST_REM 0 4 # 5 data0.remove(data4 (represents an index))
LEN 0 3 # 6 data3 = len(data0)
NEQ 3 5 7 # 7 data7 = (data3 != data5)
EQ 6 7 8 # 8 data8 = (data6 == data7)
RET 8 # 9 return data8
""", [
DList(), # data0, list
DInteger.new_int(5), # data1, fake value to add to the list
DString.new_str("hi"), # data2, fake value to add to the list
DInteger(), # data3, list length
DInteger.new_int(2), # data4, list index
DInteger.new_int(3), # data5, expected list length
DBool(), # data6, comp1
DBool(), # data7, comp2
DBool(), # data8, output
])
self.assertEqual(result.int_py(), True)
if __name__ == '__main__':
unittest.main()
|
jean/sentry
|
src/sentry/web/frontend/project_settings.py
|
Python
|
bsd-3-clause
| 12,517
| 0.001997
|
from __future__ import absolute_import
import re
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from uuid import uuid1
from sentry import options
from sentry.models import AuditLogEntryEvent, Project, Team
from sentry.web.forms.fields import (
CustomTypedChoiceField,
RangeField,
OriginsField,
)
from sentry.web.frontend.base import ProjectView
BLANK_CHOICE = [("", "")]
class EditProjectForm(forms.ModelForm):
name = forms.CharField(
label=_('Project Name'),
max_length=200,
widget=forms.TextInput(attrs={'placeholder': _('Production')})
)
slug = forms.SlugField(
label=_('Short name'),
help_text=_('A unique ID used to identify this project.'),
)
team = CustomTypedChoiceField(choices=(), coerce=int, required=False)
origins = OriginsField(
label=_('Allowed Domains'),
required=False,
help_text=_('Separate multiple entries with a newline.')
)
token = forms.CharField(
label=_('Security token'),
help_text=_(
'Outbound requests matching Allowed Domains will have the header "{token_header}: {token}" appended.'
),
required=True,
)
token_header = forms.CharField(
label=_('Security token header'),
help_text=_(
'Outbound requests matching Allowed Domains will have the header "{token_header}: {token}" appended.'
),
widget=forms.TextInput(attrs={
'placeholder': _('X-Sentry-Token'),
}),
required=False,
)
verify_ssl = forms.BooleanField(
label=_('Verify TLS/SSL'),
help_text=_('Outbound requests will verify TLS (sometimes known as SSL) connections.'),
required=False,
)
resolve_age = RangeField(
label=_('Auto resolve'),
required=False,
min_value=0,
max_value=720,
step_value=1,
help_text=_(
'Au
|
tomatically resolve an issue if it hasn\'t been seen for this amount of time.'
)
)
scrub_dat
|
a = forms.BooleanField(
label=_('Data Scrubber'), help_text=_('Enable server-side data scrubbing.'), required=False
)
scrub_defaults = forms.BooleanField(
label=_('Use Default Scrubbers'),
help_text=_(
'Apply default scrubbers to prevent things like passwords and credit cards from being stored.'
),
required=False
)
sensitive_fields = forms.CharField(
label=_('Additional sensitive fields'),
help_text=_(
'Additional field names to match against when scrubbing data. Separate multiple entries with a newline.'
),
widget=forms.Textarea(
attrs={
'placeholder': mark_safe(_('e.g. email')),
'class': 'span8',
'rows': '3',
}
),
required=False,
)
safe_fields = forms.CharField(
label=_('Safe fields'),
help_text=_(
'Field names which data scrubbers should ignore. '
'Separate multiple entries with a newline.'
),
widget=forms.Textarea(
attrs={
'placeholder': mark_safe(_('e.g. email')),
'class': 'span8',
'rows': '3',
}
),
required=False,
)
scrub_ip_address = forms.BooleanField(
label=_('Don\'t store IP Addresses'),
help_text=_('Prevent IP addresses from being stored for new events.'),
required=False
)
# JavaScript options
scrape_javascript = forms.BooleanField(
label=_('Enable JavaScript source fetching'),
help_text=_('Allow Sentry to scrape missing JavaScript source context when possible.'),
required=False,
)
# Options that are overridden by Organization level settings
org_overrides = ('scrub_data', 'scrub_defaults', 'scrub_ip_address')
default_environment = forms.CharField(
label=_('Default Environment'),
help_text=_('The default selected environment when viewing issues.'),
widget=forms.TextInput(attrs={'placeholder': _('e.g. production')}),
required=False,
)
mail_subject_prefix = forms.CharField(
label=_('Subject Prefix'),
required=False,
help_text=_('Choose a custom prefix for emails from this project.')
)
class Meta:
fields = ('name', 'team', 'slug')
model = Project
def __init__(self, request, organization, team_list, data, instance, *args, **kwargs):
# First, we need to check for the value overrides from the Organization options
# We need to do this before `initial` gets passed into the Form.
disabled = []
if 'initial' in kwargs:
for opt in self.org_overrides:
value = bool(organization.get_option('sentry:require_%s' % (opt, ), False))
if value:
disabled.append(opt)
kwargs['initial'][opt] = value
super(EditProjectForm, self).__init__(data=data, instance=instance, *args, **kwargs)
self.organization = organization
self.team_list = team_list
self.fields['team'].choices = self.get_team_choices(team_list, instance.team)
self.fields['team'].widget.choices = self.fields['team'].choices
# After the Form is initialized, we now need to disable the fields that have been
# overridden from Organization options.
for opt in disabled:
self.fields[opt].widget.attrs['disabled'] = 'disabled'
def get_team_label(self, team):
return '%s (%s)' % (team.name, team.slug)
def get_team_choices(self, team_list, default=None):
sorted_team_list = sorted(team_list, key=lambda x: x.name)
choices = []
for team in sorted_team_list:
# TODO: optimize queries
choices.append((team.id, self.get_team_label(team)))
if default is None:
choices.insert(0, (-1, mark_safe('–' * 8)))
elif default not in sorted_team_list:
choices.insert(0, (default.id, self.get_team_label(default)))
return choices
def clean_sensitive_fields(self):
value = self.cleaned_data.get('sensitive_fields')
if not value:
return
return filter(bool, (v.lower().strip() for v in value.split('\n')))
def clean_safe_fields(self):
value = self.cleaned_data.get('safe_fields')
if not value:
return
return filter(bool, (v.lower().strip() for v in value.split('\n')))
def clean_team(self):
value = self.cleaned_data.get('team')
if not value:
return
# TODO: why is this not already an int?
value = int(value)
if value == -1:
return
if self.instance.team and value == self.instance.team.id:
return self.instance.team
for team in self.team_list:
if value == team.id:
return team
raise forms.ValidationError('Unable to find chosen team')
def clean_slug(self):
slug = self.cleaned_data.get('slug')
if not slug:
return
other = Project.objects.filter(
slug=slug, organization=self.organization
).exclude(id=self.instance.id).first()
if other is not None:
raise forms.ValidationError(
'Another project (%s) is already '
'using that slug' % other.name
)
return slug
def clean_token(self):
token = self.cleaned_data.get('token')
if not token:
return
token_re = r'^[-a-zA-Z0-9+/= ]{1,255}$'
if not re.match(token_re, token):
raise forms.ValidationError('Invalid security token, must be: %s' % token_re)
return token
def clean_token_header(self):
token_header = self.cleaned_data.get('token
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py
|
Python
|
apache-2.0
| 1,581
| 0.001265
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ReadTensorboardBlobData
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_sync]
from google.cloud import aiplatform_v1
def sample_read_tensorboard_blob_data()
|
:
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Init
|
ialize request argument(s)
request = aiplatform_v1.ReadTensorboardBlobDataRequest(
time_series="time_series_value",
)
# Make the request
stream = client.read_tensorboard_blob_data(request=request)
# Handle the response
for response in stream:
print(response)
# [END aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_sync]
|
jachris/cook
|
cook/cpp.py
|
Python
|
mit
| 18,097
| 0
|
import os
import re
from . import core
@core.rule
def executable(
name, sources=None, include=None, define=None, flags=None, links=None,
compiler=None, warnings_are_errors=False, scan=True, debug=True,
objects=None, linkflags=None
):
if compiler is None:
compiler, toolchain = _get_default_compiler()
else:
toolchain = _get_toolchain(compiler)
if toolchain is None:
raise ValueError('toolchain could not be detected')
include = list(include) if include else []
define = dict(define) if define else {}
flags = list(flags) if flags else []
objects = list(objects) if objects else []
linkflags = list(linkflags) if linkflags else []
static = []
shared = []
if links:
for link in links:
if isinstance(link, str):
lib = find_static_library(link)
if lib is None:
raise ValueError('lib could not be found: ' + link)
static.append(lib)
elif getattr(link, 'type') == 'cpp.static_library':
include.extend(link.headers)
static.append(core.resolve(link.output))
elif getattr(link, 'type') == 'cpp.shared_library':
include.extend(link.headers)
if toolchain is GNU:
shared.append(core.resolve(link.output))
else:
shared.append(core.resolve(link.msvc_lib))
else:
raise TypeError('invalid entry in links: "{}"'.format(link))
if toolchain is MSVC:
name += '.exe'
name = core.build(name)
for source in sources:
obj = object(
sources=[source],
include=include,
define=define,
flags=flags,
compiler=compiler,
error_warnings=warnings_are_errors,
scan=scan,
debug=debug
)
objects.append(core.resolve(obj.output))
yield core.publish(
inputs=objects + static + shared,
message='Link {}'.format(name),
outputs=[name],
result={
'type': 'cpp.executable'
},
check=linkflags
)
if toolchain is GNU:
command = [compiler, '-o', name]
command.extend(objects)
command.extend(static)
for s in shared:
command.append(s)
command.append('-Wl,-rpath,' + os.path.dirname(core.absolute(s)))
command.append('-lstdc++')
command.extend(linkflags)
core.call(command)
elif toolchain is MSVC:
command = [compiler, '/Fe' + name, '/nologo']
command.extend(objects + shared + static)
command.extend(linkflags)
core.call(command, env=_msvc_get_cl_env(compiler))
@core.rule
def static_library(
name=None, sources=None, include=None, define=None, flags=None,
headers=None, compiler=None, warnings_are_errors=False, scan=True,
debug=True, objects=None, linkflags=None
):
if compiler is None:
compiler, toolchain = _get_default_compiler()
else:
toolchain = _get_toolchain(compiler)
if toolchain is None:
raise ValueError('toolchain could not be detected')
if headers is None:
headers = []
if sources is None:
sources = []
if objects is None:
objects = []
linkflags = list(linkflags) if linkflags else []
for source in sources:
obj = object(
sources=[source],
compiler=compiler,
scan=scan,
include=include,
define=define,
flags=flags,
error_warnings=warnings_are_errors,
debug=debug
)
objects.append(obj.output)
if name is None:
name = core.intermediate(core.checksum(
sources, compiler, toolchain, include, define, headers))
else:
name = core.build(name)
if toolchain is MSVC:
name += '.lib'
elif toolchain is GNU:
name += '.a'
yield core.publish(
inputs=objects,
message='Static {}'.format(name),
outputs=[name],
result={
'type': 'cpp.static_library',
'headers': core.absolute(core.resolve(headers))
},
check=linkflags
)
if toolchain is GNU:
archiver = core.which('ar')
command = [archiver, 'rs', name]
command.extend(objects)
command.extend(linkflags)
core.call(command)
elif toolchain is MSVC:
archiver = os.path.join(os.path.dirname(compiler), 'lib.exe')
c
|
ommand = [archiver, '/OUT:' + name]
command.extend(objects)
command.extend(linkflags)
core.call(command, env=_msvc_get_cl_env(compiler))
@core.rule
def shared_li
|
brary(
name, sources, include=None, define=None, flags=None, headers=None,
compiler=None, warnings_are_errors=False, scan=True, msvc_lib=False,
debug=True, linkflags=None
):
if compiler is None:
compiler, toolchain = _get_default_compiler()
else:
toolchain = _get_toolchain(compiler)
if toolchain is None:
raise ValueError('toolchain could not be detected')
if headers is None:
headers = []
linkflags = list(linkflags) if linkflags else []
if flags is None:
flags = []
if toolchain is GNU:
flags.append('-fPIC')
if define is None:
define = {}
define['DLL_EXPORT'] = 1
objects = []
for source in sources:
obj = object(
sources=[source],
compiler=compiler,
scan=scan,
include=include,
define=define,
flags=flags,
error_warnings=warnings_are_errors,
debug=debug
)
objects.append(obj.output)
if toolchain is MSVC:
lib = name + '.lib'
if msvc_lib:
lib = core.build(lib)
else:
lib = core.intermediate(lib)
name = core.build(name + '.dll')
else:
lib = None
head, tail = os.path.split(name)
name = core.build(os.path.join(head, 'lib' + tail + '.so'))
yield core.publish(
inputs=objects,
message='Shared {}'.format(name),
outputs=[name, lib] if lib else [name],
result={
'type': 'cpp.shared_library',
'msvc_lib': core.absolute(lib),
'headers': core.absolute(core.resolve(headers)),
'output': core.absolute(name)
},
check=linkflags
)
if toolchain is GNU:
command = [compiler, '-shared', '-o', name]
command.extend(objects)
command.append('-Wl,-soname,' + os.path.basename(name))
command.extend(linkflags)
core.call(command)
elif toolchain is MSVC:
command = [compiler, '/Fe' + name, '/nologo', '/LD']
command.extend(objects)
command.extend(linkflags)
core.call(command, env=_msvc_get_cl_env(compiler))
base = os.path.splitext(name)[0]
if not msvc_lib:
origin = base + '.lib'
if os.path.isfile(lib):
os.remove(lib)
os.rename(origin, lib)
os.remove(base + '.exp')
else:
raise NotImplementedError
@core.rule
def object(
name=None, sources=None, include=None, define=None, flags=None,
compiler=None, error_warnings=False, scan=True, debug=True, depend=None
):
if isinstance(sources, str):
raise TypeError('sources must not be a string - try to use a list')
if not sources:
raise ValueError('sources must not be empty')
sources = core.resolve(sources)
include = list(include) if include else []
define = dict(define) if define else {}
flags = list(flags) if flags else []
depend = list(depend) if depend else []
if compiler is None:
compiler, toolchain = _get_default_compiler()
else:
toolchain = _get_toolchain(compiler)
if toolchain is None:
raise ValueError('toolchain could not be detected')
if name is None:
name = core.intermediate(core.checksum(
|
lodow/portia-proxy
|
slybot/slybot/exporter.py
|
Python
|
bsd-3-clause
| 325
| 0.006154
|
from
|
scrapy.contrib.exporter import CsvItemExporter
from scrapy.conf import settings
class SlybotCSVItemExporter(CsvItemExporter):
def __init__(self, *args, **kwargs):
kwargs['fields_to_export'] = settings.getlist('CSV_EXPORT_FIELDS') or None
super(SlybotC
|
SVItemExporter, self).__init__(*args, **kwargs)
|
ursky/metaWRAP
|
bin/metawrap-scripts/sam_to_fastq.py
|
Python
|
mit
| 173
| 0.052023
|
#!/usr/bin/env python2.7
import sys
for line in open(sys.arg
|
v[1]):
cut=line.split('\t')
if len(cut)
|
<11: continue
print ">"+cut[0]
print cut[9]
print "+"
print cut[10]
|
ctwiz/stardust
|
qa/rpc-tests/mempool_spendcoinbase.py
|
Python
|
mit
| 2,474
| 0.005659
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Stardust Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test spending coinbase transactions.
# The coinbase transaction in block N can appear in block
# N+100... so is valid in the mempool when the best block
# height is N+99.
# This test makes sure coinbase spends that will be mature
# in the next block are accepted into the memory pool,
# but less mature coinbase spends are NOT.
#
from test_framework.test_framework import StardustTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(StardustTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
def setup_network(self):
# Just need one node for this test
args = ["-checkmem
|
pool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at
|
height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
|
darylmathison/github-user-queries
|
tests/main/test_service.py
|
Python
|
gpl-3.0
| 7,359
| 0.001495
|
import unittest
from unittest.mock import patch
from app.main.service import GitHubUserService
@patch("app.main.service.github")
class TestGitHubUserService(unittest.TestCase):
def setUp(self):
self.test_user = "test"
self.retrieved_repos_return = [
{
"fork": False,
"name": "test_non_fork",
"pull_url": "http://localhost/non_fork/pulls",
"url": "https://localhost/non_fork",
"full_name": self.test_user + "/test_non_fork",
"html_url": "https://localhost"
},
{
"fork": True,
"name": "test_fork",
"full_name": self.test_user + "/test_fork",
"url": "https://localhost/child",
"html_url": "https://localhost",
"parent": {
"fork": False,
"name": "parent",
"ur
|
l": "http://parent",
"full_n
|
ame": self.test_user + "1/test_parent",
"pull_url": "https://localhost/parent/pulls",
"html_url": "https://localhost/parent"
}
}
]
def test_search_for_users_error(self, github_client):
message = "too many"
github_client.search_for_user.return_value = {"error": message}
assert GitHubUserService.search_for_user("nobody") == message
def test_search_for_users_success(self, github_client):
github_client_return = [{
"avatar_url": "test",
"repos_url": "http://localhost",
"html_url": "https://localhost",
"login": "nobody"
}]
github_client.search_for_user.return_value = github_client_return
found_users = GitHubUserService.search_for_users("nobody")
self.assertEqual(found_users[0].avatar_url, github_client_return[0]["avatar_url"])
self.assertEqual(found_users[0].repos_url, github_client_return[0]["repos_url"])
self.assertEqual(found_users[0].url, github_client_return[0]["html_url"])
self.assertEqual(found_users[0].login, github_client_return[0]["login"])
def test_retrieve_repos_if_fork_with_pr(self, github_client):
def local_mock_retrieve_pulls(url, state):
pulls = [
{
"html_url": "https://localhost/parent/pulls",
"title": "test title",
"user": {
"login": self.test_user
}
}
]
if "parent" in url:
return pulls
else:
pulls[0]["html_url"] = self.retrieved_repos_return[0]["html_url"]
return pulls
# mocks
github_client.retrieve_repos.return_value = self.retrieved_repos_return
github_client.retrieve_repo.side_effect = self.mock_retrieve_repo
github_client.retrieve_pulls.side_effect = local_mock_retrieve_pulls
actual_repos = GitHubUserService.retrieve_repos(self.test_user)
self.assertEqual(2, len(actual_repos))
for repo in actual_repos:
if repo.is_fork:
self.assertTrue("parent" in
repo.pull_requests[0].url,
"The parent pulls are not in the repo: {}"
.format(repo.name))
def test_retrieve_repos_if_fork_without_pr(self, github_client):
def local_mock_retrieve_pulls(url, state):
pulls = [
{
"html_url": "https://localhost/parent/pulls",
"title": "test title",
"user": {
"login": self.test_user
}
}
]
if "parent" in url:
return []
else:
pulls[0]["html_url"] = self.retrieved_repos_return[0]["html_url"]
return pulls
# mocks
github_client.retrieve_repos.return_value = self.retrieved_repos_return
github_client.retrieve_repo.side_effect = self.mock_retrieve_repo
github_client.retrieve_pulls.side_effect = local_mock_retrieve_pulls
actual_repos = GitHubUserService.retrieve_repos(self.test_user)
for repo in actual_repos:
if repo.is_fork:
self.assertIsNone(repo.pull_requests,
"The parent pulls are not in the repo: {}"
.format(repo.name))
def test_retrieve_repos_if_source_with_pr(self, github_client):
def local_mock_retrieve_pulls(url, state):
pulls = [
{
"html_url": "https://localhost/non_fork/pulls",
"title": "test title",
"user": {
"login": self.test_user
}
}
]
return pulls
# mocks
github_client.retrieve_repos.return_value = self.retrieved_repos_return
github_client.retrieve_repo.side_effect = self.mock_retrieve_repo
github_client.retrieve_pulls.side_effect = local_mock_retrieve_pulls
actual_repos = GitHubUserService.retrieve_repos(self.test_user)
self.assertEqual(2, len(actual_repos))
for repo in actual_repos:
if not repo.is_fork:
self.assertTrue("non_fork" in
repo.pull_requests[0].url,
"The non_fork pulls are not in the repo: {}"
.format(repo.name))
def test_retrieve_repos_if_source_without_pr(self, github_client):
def local_mock_retrieve_pulls(url, state):
return []
# mocks
github_client.retrieve_repos.return_value = self.retrieved_repos_return
github_client.retrieve_repo.side_effect = self.mock_retrieve_repo
github_client.retrieve_pulls.side_effect = local_mock_retrieve_pulls
actual_repos = GitHubUserService.retrieve_repos(self.test_user)
self.assertEqual(2, len(actual_repos))
for repo in actual_repos:
if not repo.is_fork:
self.assertIsNone(repo.pull_requests,
"The non_fork pulls are not in the repo: {}"
.format(repo.name))
# -----------------helper mock functions--------------------
def mock_retrieve_repo(self, url):
if "non_fork" in url:
return self.retrieved_repos_return[0]
elif "parent" in url:
return self.retrieved_repos_return[1]["parent"]
else:
return self.retrieved_repos_return[1]
def mock_retrieve_pulls(self, url, state):
pulls = [
{
"html_url": "https://localhost/parent/pulls",
"title": "test title",
"user": {
"login": self.test_user
}
}
]
if "parent" in url:
return pulls
else:
pulls[0]["html_url"] = self.retrieved_repos_return[0]["html_url"]
return pulls
if __name__ == '__main__':
unittest.main()
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/histogram/marker/pattern/_bgcolorsrc.py
|
Python
|
mit
| 433
| 0.002309
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="histogram.marker.pattern", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
|
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
|
)
|
unistra/django-sympa
|
docs/conf.py
|
Python
|
gpl-2.0
| 8,528
| 0.00598
|
# -*- coding: utf-8 -*-
#
# sympa documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 25 18:11:49 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration
|
values are present in this
# autogenerated file.
#
# All configuration values have a default
|
; values that are commented out
# serve to show the default.
from datetime import date
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sympa'
copyright = u'%s, Direction Informatique' % date.today().strftime("%Y")
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sympadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'sympa.tex', u'sympa Documentation',
u'Direction Informatique', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sympa', u'sympa Documentation',
[u'Direction Informatique'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sympa', u'sympa Documentation',
u'Direction Informatique', 'sympa', 'One line description of project.',
'Miscellaneous'),
]
# Document
|
aldialimucaj/Streaker
|
setup.py
|
Python
|
mit
| 680
| 0
|
from distutils.core import set
|
up
setup(
# Application name:
name="streaker",
# Version number (initial):
version="0.0.1",
# Application author details:
author="Aldi Alimucaj",
author_email="aldi.alimucaj@gmail.com",
# Packages
packages=["streaker"],
scripts=['bin/streaker'],
# Include additional files into the package
include_package_da
|
ta=True,
# Details
url="http://pypi.python.org/pypi/Streaker_v001/",
#
license="MIT",
description="GitHub streak manipulator",
# long_description=open("README.txt").read(),
# Dependent packages (distributions)
install_requires=[
# "",
],
)
|
syleam/document_csv
|
wizard/launch.py
|
Python
|
gpl-3.0
| 3,320
| 0.002711
|
# -*- coding: utf-8 -*-
##############################################################################
#
# document_csv module for OpenERP, Import structure in CSV
# Copyright (C) 2011 SYLEAM (<http://www.syleam.fr/>)
# Christophe CHAUVET <christophe.chauvet@syleam.fr>
# Copyright (C) 2011 Camptocamp (http://www.camptocamp.com)
# Guewen Baconnier
#
# This file is a part of document_csv
#
# document_csv is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, ei
|
ther version 3 of the License, or
# (at your option) any later version.
#
# document_csv is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even th
|
e implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
from osv import fields
class LaunchImport(osv.osv_memory):
_name = 'wizard.launch.import.csv'
_description = 'Interface to launch CSV import'
_rec_name = 'import_list'
def _import_list(self, cr, uid, context=None):
implist_obj = self.pool.get('document.import.list')
doc_ids = implist_obj.search(cr, uid, [('disable', '=', False)])
if doc_ids:
return [(x.id, x.name) for x in implist_obj.browse(cr, uid, doc_ids, context=context)]
return []
_columns = {
'import_list': fields.selection(_import_list, 'List', help='List of available import structure', required=True),
'import_file': fields.binary('Filename', required=True),
'lang_id': fields.many2one('res.lang', 'Language', help='Translation to update.'),
'email_result': fields.char('Email', size=256, help='Email to send notification when import is finished'),
}
def default_get(self, cr, uid, fields_list, context=None):
"""
Retrieve email for this user
"""
if context is None:
context = {}
res = super(LaunchImport, self).default_get(cr, uid, fields_list, context=context)
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
res['email_result'] = user.user_email or ''
if context.get('lang'):
res['lang_id'] = self.pool.get('res.lang').search(cr, uid, [('code', '=', context['lang'])], context=context)
return res
def launch_import(self, cr, uid, ids, context=None):
"""
Save file, and execute importation
"""
if context is None:
context = {}
cur = self.browse(cr, uid, ids[0], context=context)
ctx = context.copy()
if cur.lang_id:
ctx.update({'lang': cur.lang_id.code})
self.pool.get('ir.attachment').import_csv(cr, uid, int(cur.import_list), cur.import_file, cur.email_result, context=ctx)
return {'type': 'ir.actions.act_window_close'}
LaunchImport()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
texta-tk/texta
|
utils/word_cluster.py
|
Python
|
gpl-3.0
| 2,505
| 0.007186
|
from sklearn.cluster import MiniBatchKMeans
import numpy as np
import json
import os
from texta.settings import MODELS_DIR
class WordCluster(object):
"""
WordCluster object to cluster Word2Vec vectors using MiniBatchKMeans.
: param embedding : Word2Vec object
: param n_clusters, int, number o
|
f clusters in output
"""
def __init__(self):
self.word_to_cluster_dict = {}
self.cluster_dict = {}
def cluster(self, embedding, n_clusters=None):
vocab = list(embedding.wv.vocab.keys())
vocab_vectors = np.array([embedding[word] for word in vocab])
if not n_clusters:
# number of clusters = 10% of embedding vocabulary
# if larger
|
than 1000, limit to 1000
n_clusters = int(len(vocab) * 0.1)
if n_clusters > 1000:
n_clusters = 1000
clustering = MiniBatchKMeans(n_clusters=n_clusters).fit(vocab_vectors)
cluster_labels = clustering.labels_
for i,cluster_label in enumerate(cluster_labels):
word = vocab[i]
etalon = embedding.wv.most_similar(positive=[clustering.cluster_centers_[cluster_label]])[0][0]
if etalon not in self.cluster_dict:
self.cluster_dict[etalon] = []
self.cluster_dict[etalon].append(word)
self.word_to_cluster_dict[word] = etalon
return True
def query(self, word):
try:
return self.cluster_dict[self.word_to_cluster_dict[word]]
except:
return []
def text_to_clusters(self, text):
text = [str(self.word_to_cluster_dict[word]) for word in text if word in self.word_to_cluster_dict]
return ' '.join(text)
def save(self, file_path):
try:
data = {"word_to_cluster_dict": self.word_to_cluster_dict, "cluster_dict": self.cluster_dict}
with open(file_path, 'w') as fh:
fh.write(json.dumps(data))
return True
except:
return False
def load(self, unique_id, task_type='train_tagger'):
file_path = os.path.join(MODELS_DIR, task_type, 'cluster_{}'.format(unique_id))
try:
with open(file_path) as fh:
data = json.loads(fh.read())
self.cluster_dict = data["cluster_dict"]
self.word_to_cluster_dict = data["word_to_cluster_dict"]
except:
return False
|
ardi69/pyload-0.4.10
|
pyload/plugin/extractor/UnZip.py
|
Python
|
gpl-3.0
| 1,891
| 0.00899
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
import zipfile
from pyload.plugin.Extractor import Extractor, ArchiveError, CRCError, PasswordError
from pyload.utils import fs_encode
class UnZip(Extractor):
__name = "UnZip"
__type = "extractor"
__version = "1.12"
__description = """Zip extractor plugin"""
__license = "GPLv3"
__authors = [("Walter Purcaro", "vuolter@gmail.com")]
EXTENSIONS = [".zip", ".zip64"]
NAME = __name__.rsplit('.', 1)[1]
VERSION = "(python %s.%s.%s)" % (sys.version_info[0], sys.version_info[1], sys.version_info[2])
@classmethod
def isUsable(cls):
return sys.version_info[:2] >= (2, 6)
def list(self, password=None):
with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z:
z.setpassword(password)
return z.namelist()
def check(self, password):
pass
def verify(self):
with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z:
badfile = z.testzip()
if badfile:
raise CRCError(badfile)
else:
raise PasswordError
def extract(self, password=None):
try:
with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z:
z.setpassword(password)
badfile = z.testzip()
if badfile:
|
raise CRCError(badfile)
else:
z.extractall(self.out)
except (zipfile.BadZipfile, zipfile.LargeZipFile), e:
raise ArchiveError(e)
except RuntimeError,
|
e:
if "encrypted" in e:
raise PasswordError
else:
raise ArchiveError(e)
else:
self.files = z.namelist()
|
menghanY/LeetCode-Python
|
LinkedList/LinkedListCycleII.py
|
Python
|
mit
| 474
| 0.018987
|
# https://leetcode.com/problems/linked-list-cycle-ii/
fr
|
om ListNode import ListNode
class Solution(object):
def detectCycle(self, head):
slow,fast = head,head
while True:
if fast == None or fast.next == None : return None
slow = slow.next
fast = fast.next.next
if slow == fast :
break
while head != fast:
head = head.next
|
fast = fast.next
return head
|
jkitchin/pycse
|
pycse/tests/test_lisp.py
|
Python
|
gpl-2.0
| 720
| 0.013889
|
from pycse.lisp import *
def test_symbol():
assert Symbol('setf').lisp == 'setf'
def test_quote():
assert Quote('setf').lisp == "'setf"
def test_sharpquote():
assert SharpQuote('setf').lisp == "#'setf"
def
|
test_cons():
assert Cons('a', 3).lisp == '("a" . 3)'
def test_Alist():
assert Alist(["a", 1, "b", 2]).lisp == '(("a" . 1) ("b" . 2))'
def test_vector():
assert Vector(["a", 1, 3]).lisp == '["a" 1 3]'
def test_Comma():
assert Comma(Symbol("setf")).lisp == ',setf'
def test_splice():
assert Splice([1, 3]).lisp == ',@(1 3)'
def test_backquote():
assert Backquote([Symbol
|
("a"), 1]).lisp == '`(a 1)'
def test_comment():
assert Comment(Symbol("test")).lisp == '; test'
|
hzlf/openbroadcast
|
website/urls_api.py
|
Python
|
gpl-3.0
| 1,715
| 0.002915
|
from django.conf.urls.defaults import *
from tastypie.api import Api
#from tastytools.api import Api
from base.api import BaseResource
from bcmon.api import PlayoutResource as BcmonPlayoutResource
from bcmon.api import ChannelResource as BcmonChannelResource
from alibrary.api import MediaResource, ReleaseResource, ArtistResource, LabelResource, SimplePlaylistResource, PlaylistResource, PlaylistItemPlaylistResource
from importer.api import ImportResource, ImportFileResource
from exporter.api import ExportResource, ExportItemResource
from abcast.api import StationResource, ChannelResource, JingleResource, JingleSetResource, EmissionResource
from abcast.api import BaseResource as AbcastBaseResour
|
ce
from istats.api import StatsResource
from fluent_comments.api import CommentResource
api = Api()
# base
api.register(BaseResource())
# bcmon
api.register(BcmonPlayoutResource())
api.register(BcmonChannelResource())
# library
api.register(MediaResource())
api.register(ReleaseResource())
api.register(ArtistResource())
api.register(LabelResource())
api.register(SimplePlaylistResource())
api.register(Pl
|
aylistResource())
api.register(PlaylistItemPlaylistResource())
# importer
api.register(ImportResource())
api.register(ImportFileResource())
# exporter
api.register(ExportResource())
api.register(ExportItemResource())
# abcast
api.register(AbcastBaseResource())
api.register(StationResource())
api.register(ChannelResource())
api.register(JingleResource())
api.register(JingleSetResource())
### scheduler
api.register(EmissionResource())
# comment
api.register(CommentResource())
# server stats
api.register(StatsResource())
"""
urlpatterns = patterns('',
(r'^', include(api.urls)),
)
"""
|
LukeMurphey/splunk-network-tools
|
src/bin/network_tools_app/portscan.py
|
Python
|
apache-2.0
| 3,454
| 0.003185
|
import socket
import sys
import threading
try:
from Queue import Queue, Empty
except:
from queue import Queue, Empty
from collections import OrderedDict
from . import parseintset
DEFAULT_THREAD_LIMIT = 200
CLOSED_STATUS = 'closed'
OPEN_STATUS = 'open'
if sys.version_info.major >= 3:
unicode = str
class Scanner(threading.Thread):
def __init__(self, input_queue, output_queue, timeout=5):
threading.Thread.__init__(self)
# These are the scan queues
self.input_queue = input_queue
self.output_queue = output_queue
self.keep_running = True
self.timeout = timeout
def run(self):
# This loop will exit when the input_queue generates an exception because all of the threads
# are complete
while self.keep_running:
try:
host, port = self.input_queue.get(timeout=5)
except Empty:
continue
# Make the socket for performing the scan
sock_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_instance.settimeout(self.timeout)
try:
# Connect to the host via TCP
sock_instance.connect((host, port))
except socket.error:
# Note that it is in the closed state
self.output_queue.put((host, port, CLOSE
|
D_STATUS))
else:
# Note that it is in the open state
self.output_queue.put((host, port, OPEN_STATUS))
sock_instance.close()
self.input_queue.task_done()
self.output_queue.task_done()
def stop_running(self):
self.keep_running = False
def port_scan(host, ports, thread_count=DEFAU
|
LT_THREAD_LIMIT, callback=None, timeout=5):
# Parse the ports if necessary
if isinstance(ports, (str, unicode)):
parsed_ports = parseintset.parseIntSet(ports)
else:
parsed_ports = ports
# Setup the queues
to_scan = Queue()
scanned = Queue()
# Prepare the scanners
# These scanners will monitor the input queue for new things to scan, scan them, and them put
# them in the output queue
scanners = [Scanner(to_scan, scanned, timeout) for i in range(min(thread_count,len(ports)))]
for scanner in scanners:
scanner.start()
# Create the list of host ports to scan
host_ports = [(host, port) for port in parsed_ports]
for host_port in host_ports:
to_scan.put(host_port)
# This will store the list of successfully executed host/port combiations
results = {}
# This will contain the resulting data
data = []
for host, port in host_ports:
while (host, port) not in results:
# Get the queued thread: this will block if necessary
scanned_host, scanned_port, scan_status = scanned.get()
# Log that that we performed the scan
results[(scanned_host, scanned_port)] = scan_status
# Append the data
data.append(OrderedDict({
'dest' : scanned_host,
'port' : 'TCP\\' + str(scanned_port),
'status': scan_status
}))
# Run the callback if one is present
if callback is not None:
callback(scanned_host, scanned_port, scan_status)
# Stop the threads
for scanner in scanners:
scanner.stop_running()
return data
|
SmartDeveloperHub/agora-fountain
|
agora/fountain/vocab/onto.py
|
Python
|
apache-2.0
| 4,018
| 0.000249
|
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Sma
|
rt Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import StringIO
import urlparse
from rdflib import Graph, RDF
from rdflib.namespace import OWL
from rdflib.plugins.parsers.notation3 import BadSyntax
import agora.fountain.vocab.schema as sch
__author__ = 'Fernando Serena'
class VocabularyException(Exception):
pass
class DuplicateVocabulary(VocabularyException):
pass
class VocabularyNotFound(VocabularyException):
pass
class UnknownVocabulary(VocabularyException):
pass
def __load_owl(owl):
"""
:param owl:
:return:
"""
owl_g = Graph()
for f in ['turtle', 'xml']:
try:
owl_g.parse(source=StringIO.StringIO(owl), format=f)
break
except SyntaxError:
pass
if not len(owl_g):
raise VocabularyException()
try:
uri = list(owl_g.subjects(RDF.type, OWL.Ontology)).pop()
vid = [p for (p, u) in owl_g.namespaces() if uri in u and p != '']
imports = owl_g.objects(uri, OWL.imports)
if not len(vid):
vid = urlparse.urlparse(uri).path.split('/')[-1]
else:
vid = vid.pop()
return vid, uri, owl_g, imports
except IndexError:
raise VocabularyNotFound()
def add_vocabulary(owl):
"""
:param owl:
:return:
"""
vid, uri, owl_g, imports = __load_owl(owl)
if vid in sch.contexts():
raise DuplicateVocabulary('Vocabulary already contained')
sch.add_context(vid, owl_g)
vids = [vid]
# TODO: Import referenced ontologies
for im_uri in imports:
print im_uri
im_g = Graph()
try:
im_g.load(im_uri, format='turtle')
except BadSyntax:
try:
im_g.load(im_uri)
except BadSyntax:
print 'bad syntax in {}'.format(im_uri)
try:
child_vids = add_vocabulary(im_g.serialize(format='turtle'))
vids.extend(child_vids)
except DuplicateVocabulary, e:
print 'already added'
except VocabularyNotFound, e:
print 'uri not found for {}'.format(im_uri)
except Exception, e:
print e.message
return vids
def update_vocabulary(vid, owl):
"""
:param vid:
:param owl:
:return:
"""
owl_vid, uri, owl_g, imports = __load_owl(owl)
if vid != owl_vid:
raise Exception("Identifiers don't match")
if vid not in sch.contexts():
raise UnknownVocabulary('Vocabulary id is not known')
sch.update_context(vid, owl_g)
def delete_vocabulary(vid):
"""
:param vid:
:return:
"""
if vid not in sch.contexts():
raise UnknownVocabulary('Vocabulary id is not known')
sch.remove_context(vid)
def get_vocabularies():
"""
:return:
"""
return sch.contexts()
def get_vocabulary(vid):
"""
:param vid:
:return:
"""
return sch.get_context(vid).serialize(format='turtle')
|
credativUK/connector-magento
|
__unported__/magentoerpconnect/magento_model.py
|
Python
|
agpl-3.0
| 28,454
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
# Copyright 2013 Akretion
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from datetime import datetime, timedelta
from openerp.osv import fields, orm
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.addons.connector.session import ConnectorSession
from openerp.addons.connector.connector import ConnectorUnit
from openerp.addons.connector.unit.mapper import (mapping,
only_create,
ImportMapper
)
from .unit.backend_adapter import GenericAdapter
from .unit.import_synchronizer import (import_batch,
DirectBatchImport,
MagentoImportSynchronizer,
AddCheckpoint,
)
from .partner import partner_import_batch
from .sale import sale_order_import_batch
from .backend import magento
from .connector import add_checkpoint
_logger = logging.getLogger(__name__)
IMPORT_DELTA_BUFFER = 30 # seconds
class magento_backend(orm.Model):
_name = 'magento.backend'
_description = 'Magento Backend'
_inherit = 'connector.backend'
_backend_type = 'magento'
def select_versions(self, cr, uid, context=None):
""" Available versions in the backend.
Can be inherited to add custom versions. Using this method
to add a version from an ``_inherit`` does not constrain
to redefine the ``version`` field in the ``_inherit`` model.
"""
return [('1.7', '1.7')]
def _select_versions(self, cr, uid, context=None):
""" Available versions in the backend.
If you want to add a version, do not override this
method, but ``select_version``.
"""
return self.select_versions(cr, uid, context=context)
def _get_stock_field_id(self, cr, uid, context=None):
field_ids = self.pool.get('ir.model.fields').search(
cr, uid,
[('model', '=', 'product.product'),
('name', '=', 'virtual_available')],
context=context)
return field_ids[0]
_columns = {
'version': fields.selection(
_select_versions,
string='Version',
required=True),
'location': fields.char(
'Location',
required=True,
help="Url to magento application"),
'admin_location': fields.char('Admin Location'),
'use_custom_api_path': fields.boolean(
'Custom Api Path',
help="The default API path is '/index.php/api/xmlrpc'. "
"Check this box if you use a custom API path, in that case, "
"the location has to be completed with the custom API path "),
'username': fields.char(
'Username',
help="Webservice user"),
'password': fields.char(
'Password',
help="Webservice password"),
'use_auth_basic': fields.boolean(
'Use HTTP Auth Basic',
help="Use a Basic Access Authentication for the API. "
"The Magento server could be configured to restrict access "
"using a HTTP authentication based on a username and "
"a password."),
'auth_basic_username': fields.char(
'Basic Auth. Username',
help="Basic access authentication web server side username"),
'auth_basic_password': fields.char(
'Basic Auth. Password',
help="Basic access authentication web server side password"),
'sale_prefix': fields.char(
'Sale Prefix',
|
help="A prefix put before the name of imported sales orders.\n"
"For instance, if the prefix is 'mag-', the sales "
"order 100000692 in Magento, will be named 'mag-100000692' "
"in OpenERP."),
'warehouse_id': fields.many2one('stock.warehouse',
'Warehouse',
required=True,
|
help='Warehouse used to compute the '
'stock quantities.'),
'website_ids': fields.one2many(
'magento.website', 'backend_id',
string='Website', readonly=True),
'default_lang_id': fields.many2one(
'res.lang',
'Default Language',
help="If a default language is selected, the records "
"will be imported in the translation of this language.\n"
"Note that a similar configuration exists "
"for each storeview."),
'default_category_id': fields.many2one(
'product.category',
string='Default Product Category',
help='If a default category is selected, products imported '
'without a category will be linked to it.'),
# add a field `auto_activate` -> activate a cron
'import_products_from_date': fields.datetime(
'Import products from date'),
'import_categories_from_date': fields.datetime(
'Import categories from date'),
'catalog_price_tax_included': fields.boolean('Prices include tax'),
'product_stock_field_id': fields.many2one(
'ir.model.fields',
string='Stock Field',
domain="[('model', 'in', ['product.product', 'product.template']),"
" ('ttype', '=', 'float')]",
help="Choose the field of the product which will be used for "
"stock inventory updates.\nIf empty, Quantity Available "
"is used."),
'product_binding_ids': fields.one2many('magento.product.product',
'backend_id',
string='Magento Products',
readonly=True),
}
_defaults = {
'product_stock_field_id': _get_stock_field_id,
'use_custom_api_path': False,
'use_auth_basic': False,
}
_sql_constraints = [
('sale_prefix_uniq', 'unique(sale_prefix)',
"A backend with the same sale prefix already exists")
]
def check_magento_structure(self, cr, uid, ids, context=None):
""" Used in each data import.
Verify if a website exists for each backend before starting the import.
"""
for backend_id in ids:
website_ids = self.pool['magento.website'].search(
cr, uid, [('backend_id', '=', backend_id)], context=context)
if not website_ids:
self.synchronize_metadata(cr, uid, backend_id, context=context)
return True
def synchronize_metadata(self, cr, uid, ids, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
session = ConnectorSession(cr, uid, context=context)
for backend_id in ids:
for model in ('magento.website',
'magento.store',
'magento.storeview'):
|
kyonetca/onionshare
|
test/onionshare_web_test.py
|
Python
|
gpl-3.0
| 1,129
| 0
|
"""
OnionShare | https://onionshare.org/
Copyright (C) 2014 Micah Lee <micah@micahflee.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a cop
|
y of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from onionshare import web
from nose import with_setup
def test_generate_slug_length():
"""ge
|
nerates a 26-character slug"""
assert len(web.slug) == 26
def test_generate_slug_characters():
"""generates a base32-encoded slug"""
def is_b32(string):
b32_alphabet = "01234556789abcdefghijklmnopqrstuvwxyz"
return all(char in b32_alphabet for char in string)
assert is_b32(web.slug)
|
themaxx75/lapare-bijoux
|
lapare.ca/lapare/settings/base.py
|
Python
|
bsd-3-clause
| 2,795
| 0.000716
|
import sys
from os.path import join, abspath, dirname
# PATH vars
here = lambda *x: join(abspath(dirname(__file__)), *x)
PROJECT_ROOT = here("..")
root = lambda *x: join(abspath(PROJECT_ROOT), *x)
sys.path.insert(0, root('apps'))
ADMINS = (
('Maxime Lapointe', 'maxx@themaxx.ca'),
)
MANAGERS = ADMINS
SHELL_PLUS = 'ipython'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'CHANGE THIS!!!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'www',
)
PROJECT_APPS = ()
INSTALLED_APPS += PROJECT_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common
|
.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'lapare.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'lapare.wsgi.application'
# Database
# https://docs.djan
|
goproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '../www_lapare_ca.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'fr-CA'
TIME_ZONE = 'America/Montreal'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = root('assets', 'uploads')
MEDIA_URL = '/media/'
# Additional locations of static files
STATICFILES_DIRS = (
root('assets'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_STORAGE = ('django.contrib.staticfiles.storage.'
'ManifestStaticFilesStorage')
TEMPLATE_DIRS = (
root('templates'),
)
# .local.py overrides all the common settings.
try:
from .local import *
except ImportError:
from .production import *
# importing test settings file if necessary
if len(sys.argv) > 1 and 'test' in sys.argv[1]:
from .testing import *
|
tlevine/alot
|
alot/commands/thread.py
|
Python
|
gpl-3.0
| 42,235
| 0
|
# Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
import os
import re
import logging
import tempfile
import argparse
from twisted.internet.defer import inlineCallbacks
import subprocess
from email.Utils import getaddresses, parseaddr
from email.message import Message
import mailcap
from cStringIO import StringIO
from alot.commands import Command, registerCommand
from alot.commands.globals import ExternalCommand
from alot.commands.globals import FlushCommand
from alot.commands.globals import ComposeCommand
from alot.commands.globals import MoveCommand
from alot.commands.globals import CommandCanceled
from alot.commands.envelope import SendCommand
from alot import completion
from alot.db.utils import decode_header
from alot.db.utils import encode_header
from alot.db.utils import extract_headers
from alot.db.utils import extract_body
from alot.db.envelope import Envelope
from alot.db.attachment import Attachment
from alot.db.errors import DatabaseROError
from alot.settings import settings
from alot.helper import parse_mailcap_nametemplate
from alot.helper import split_commandstring
from alot.helper import email_as_string
from alot.utils.booleanaction import BooleanAction
from alot.completion import ContactsCompleter
from alot.widgets.globals import AttachmentWidget
MODE = 'thread'
def determine_sender(mail, action='reply'):
"""
Inspect a given mail to reply/forward/bounce and find the most appropriate
account to act from and construct a suitable From-Header to use.
:param mail: the email to inspect
:type mail: `email.message.Message`
:param action: intended use case: one of "reply", "forward" or "bounce"
:type action: str
"""
assert action in ['reply', 'forward', 'bounce']
realname = None
address = None
# get accounts
my_accounts = settings.get_accounts()
assert my_accounts, 'no accounts set!'
# extract list of addresses to check for my address
# X-Envelope-To and Envelope-To are used to store the recipient address
# if not included in other fields
candidate_addresses = getaddresses(mail.get_all('To', []) +
mail.get_all('Cc', []) +
mail.get_all('Delivered-To', []) +
mail.get_all('X-Envelope-To', []) +
mail.get_all('Envelope-To', []) +
mail.get_all('From', []))
logging.debug('candidate addresses: %s' % candidate_addresses)
# pick the most important account that has an address in candidates
# and use that accounts realname and the address found here
for account in my_accounts:
acc_addresses = account.get_addresses()
for alias in acc_addresses:
if realname is not None:
break
regex = re.compile(re.escape(alias), flags=re.IGNORECASE)
for seen_name, seen_address in candidate_addresses:
if regex.match(seen_address):
logging.debug("match!: '%s' '%s'" % (seen_address, alias))
if settings.get(action + '_force_realname'):
realname = account.realname
else:
realname = seen_name
if settings.get(action + '_force_address'):
address = account.address
else:
address = seen_address
# revert to default account if nothing found
if realname is None:
account = my_accounts[0]
realname = account.realname
address = account.address
logging.debug('using realname: "%s"' % realname)
logging.debug('using address: %s' % address)
from_value = address if realname == '' else '%s <%s>' % (realname, address)
return from_value, account
@registerCommand(MODE, 'reply', arguments=[
(['--all'], {'action': 'store_true', 'help': 'reply to all'}),
(['--spawn'], {'action': BooleanAction, 'default': None,
'help': 'open editor in new window'})])
class ReplyCommand(Command):
"""reply to message"""
repeatable = True
def __init__(self, message=None, all=False, spawn=None, **kwargs):
"""
:param message: message to reply to (defaults to selected message)
:type message: `alot.db.message.Message`
:param all: group reply; copies recipients from Bcc/Cc/To to the reply
:type all: bool
:param spawn: force spawning of editor in a new terminal
:type spawn: bool
"""
self.message = message
self.groupreply = all
self.force_spawn = spawn
Command.__init__(self, **kwargs)
def apply(self, ui):
# get message to forward if not given in constructor
if not self.message:
self.message = ui.current_buffer.get_selected_message()
mail = self.message.get_email()
# set body text
name, address = self.message.get_author()
timestamp = self.message.get_date()
qf = settings.get_hook('reply_prefix')
if qf:
quotestring = qf(name, address, timestamp, ui=ui, dbm=ui.dbman)
else:
quotestring = 'Quoting %s (%s)\n' % (name or address, timestamp)
mailcontent = quotestring
quotehook = settings.get_hook('text_quote')
if quotehook:
mailcontent += quotehook(self.message.accumulate_body())
else:
quote_prefix = settings.get('quote_prefix')
for line in self.message.accumulate_body().splitlines():
mailcontent += quote_prefix + line + '\n'
envelope = Envelope(bodytext=mailcontent)
# copy subject
subject = decode_header(mail.get('Subject', ''))
reply_subject_hook = se
|
ttings.get_hook('reply_subject')
if reply_subject_hook:
subject = reply_subject_hook(subject)
else:
rsp = settings.get('reply_subject_prefix')
if not subject.lower().startswith(('re:', rsp.lower())):
subject = rsp + subject
envelope.add('Subject', su
|
bject)
# set From-header and sending account
try:
from_header, account = determine_sender(mail, 'reply')
except AssertionError as e:
ui.notify(e.message, priority='error')
return
envelope.add('From', from_header)
# set To
sender = mail['Reply-To'] or mail['From']
my_addresses = settings.get_addresses()
sender_address = parseaddr(sender)[1]
cc = ''
# check if reply is to self sent message
if sender_address in my_addresses:
recipients = [mail['To']]
emsg = 'Replying to own message, set recipients to: %s' \
% recipients
logging.debug(emsg)
else:
recipients = [sender]
if self.groupreply:
# make sure that our own address is not included
# if the message was self-sent, then our address is not included
MFT = mail.get_all('Mail-Followup-To', [])
followupto = self.clear_my_address(my_addresses, MFT)
if followupto and settings.get('honor_followup_to'):
logging.debug('honor followup to: %s', followupto)
recipients = [followupto]
# since Mail-Followup-To was set, ignore the Cc header
else:
if sender != mail['From']:
recipients.append(mail['From'])
# append To addresses if not replying to self sent message
if sender_address not in my_addresses:
cleared = self.clear_my_address(
my_addresses, mail.get_all('To', []))
recipients.append(cleared)
# copy cc for group-replies
if 'Cc' in mail:
cc = self.clear_my_address(
my_addresses,
|
karesansui/karesansui
|
karesansui/gadget/guesttag.py
|
Python
|
mit
| 2,641
| 0.004165
|
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS O
|
R
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRI
|
GHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import web
from karesansui.lib.rest import Rest, auth
from karesansui.lib.checker import Checker, \
CHECK_EMPTY, CHECK_VALID, CHECK_LENGTH, CHECK_CHAR
from karesansui.lib.utils import is_param, json_dumps
from karesansui.db.access.tag import findbyhost1guestall
class GuestTag(Rest):
@auth
def _GET(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
tags = findbyhost1guestall(self.orm, host_id)
if not tags:
self.logger.debug("No tags is found.")
return web.notfound()
if self.is_part() is True:
self.view.tags = tags
machine_ids = {}
for tag in tags:
tag_id = str(tag.id)
machine_ids[tag_id] = []
for machine in tag.machine:
if not machine.is_deleted:
machine_ids[tag_id].append("tag_machine%s"% machine.id)
machine_ids[tag_id] = " ".join(machine_ids[tag_id])
self.view.machine_ids = machine_ids
return True
elif self.is_json() is True:
tags_json = []
for tag in tags:
tags_json.append(tag.get_json(self.me.languages))
self.view.tags = json_dumps(tags_json)
return True
else:
return web.nomethod()
urls = (
'/host/(\d+)/guest/tag/?(\.part|\.json)$', GuestTag,
)
|
mcr/ietfdb
|
ietf/wginfo/urls.py
|
Python
|
bsd-3-clause
| 2,177
| 0.009187
|
# Copyright The IETF Trust 2008, All Rights Reserved
from django.conf.urls.defaults import patterns, include
from ietf.wginfo import views, edit, milestones
from django.views.generic.simple import redirect_to
urlpatterns = patterns('',
(r'^$', views.wg_dir),
(r'^summary.txt', redirect_to, { 'url':'/wg/1wg-summary.txt' }),
(r'^summary-by-area.txt', redirect_to, { 'url':'/wg/1wg-summary.txt' }),
(r'^summary-by-acronym.txt', redirect_to, { 'url':'/wg/1wg-summary-by-acronym.txt' }),
(r'^1wg-summary.txt', views.wg_summary_area),
(r'^1wg-summary-by-acronym.txt', views.wg_summary_acronym),
(r'^1wg-charters.txt', views.wg_charters),
(r'^1wg-charters-by-acronym.txt', views.wg_charters_by_acronym),
(r'^chartering/$', views.chartering_wgs),
(r'^bofs/$', views.bofs),
(r'^chartering/create/$', edit.edit, {'action': "charter"}, "wg_create"),
(r'^bofs/create/$', edit.edit, {'action': "create"}, "bof_create"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/documents/txt/$', views.wg_documents_txt),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/$', views.wg_documents_html, None, "wg_docs"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/charter/$', views.wg_charter, None, 'wg_charter'),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/init-charter/', edit.submit_initial_charter, None, "wg_init_charter"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/history/$', views.history),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/edit/$', edit.edit, {'action': "edit"}, "wg_edit"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/conclude/$', edit.conclude, None, "wg_conclude"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/milestones/$', milestones.edit_milestones, {'milestone_set': "current"}, "wg_edit_milestones"
|
),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/milestones/charter/$', milestones.edit_milestones, {'milestone_set': "charter"}, "wg_edit_charter_milestones"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/milestones/charter/reset/$', milestones.reset_charter_milestones,
|
None, "wg_reset_charter_milestones"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/ajax/searchdocs/$', milestones.ajax_search_docs, None, "wg_ajax_search_docs"),
(r'^(?P<acronym>[^/]+)/management/', include('ietf.wgchairs.urls')),
)
|
modoboa/modoboa
|
modoboa/admin/api/v1/urls.py
|
Python
|
isc
| 548
| 0.001825
|
"""Admin API urls."""
from rest_framework import routers
from . import viewsets
router = routers.SimpleRouter()
router.register(r"domains", viewsets.DomainViewSet, basename="domain")
router.register(
r"domainaliases", v
|
iewsets.DomainAliasViewSet, basename="domain_alias")
router.register(r"accounts", viewsets.AccountViewSet, basename="account")
router.register(r"aliases", viewsets.AliasViewSet, basename="alias")
router.register(
r"senderaddresses", view
|
sets.SenderAddressViewSet, basename="sender_address")
urlpatterns = router.urls
|
quarkslab/qb-sync
|
ext_ida/dispatcher.py
|
Python
|
gpl-3.0
| 14,364
| 0.00181
|
#
# Copyright (C) 2012-2014, Quarkslab.
#
# This file is part of qb-sync.
#
# qb-sync is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the Lice
|
nse, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
impo
|
rt sys
import socket
import select
import base64
import binascii
import re
import ConfigParser
import traceback
HOST = 'localhost'
PORT = 9100
try:
import json
except:
print "[-] failed to import json\n%s" % repr(sys.exc_info())
sys.exit(0)
class Client():
def __init__(self, s_client, s_srv, name):
self.client_sock = s_client
self.srv_sock = s_srv
self.name = name
self.enabled = False
self.buffer = ''
def close(self):
self.enabled = False
if self.client_sock:
self.client_sock.close()
if self.srv_sock:
self.srv_sock.close()
def feed(self, data):
batch = []
self.buffer = ''.join([self.buffer, data])
if self.buffer.endswith("\n"):
batch = [req for req in self.buffer.strip().split('\n') if req != '']
self.buffer = ''
return batch
class DispatcherSrv():
def __init__(self):
self.idb_clients = []
self.dbg_client = None
self.srv_socks = []
self.opened_socks = []
self.current_dbg = None
self.current_dialect = 'unknown'
self.current_idb = None
self.current_module = None
self.sync_mode_auto = True
self.pat = re.compile('dbg disconnected')
self.req_handlers = {
'new_client': self.req_new_client,
'new_dbg': self.req_new_dbg,
'dbg_quit': self.req_dbg_quit,
'idb_n': self.req_idb_n,
'idb_list': self.req_idb_list,
'module': self.req_module,
'sync_mode': self.req_sync_mode,
'cmd': self.req_cmd,
'bc': self.req_bc,
'kill': self.req_kill
}
def bind(self, host, port):
self.dbg_srv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.dbg_srv_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.dbg_srv_sock.bind((host, port))
self.srv_socks.append(self.dbg_srv_sock)
if not (socket.gethostbyname(host) == '127.0.0.1'):
self.localhost_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.localhost_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.localhost_sock.bind(('localhost', port))
self.srv_socks.append(self.localhost_sock)
def accept(self, s):
new_socket, addr = s.accept()
self.opened_socks.append(new_socket)
def listen(self):
for s in self.srv_socks:
s.listen(5)
def close(self, s):
s.close()
self.opened_socks.remove(s)
def loop(self):
self.listen()
self.announcement("dispatcher listening")
while True:
rlist, wlist, xlist = select.select(self.srv_socks + self.opened_socks, [], [])
if not rlist:
self.announcement("socket error: select")
raise Exception("rabbit eating the cable")
for s in rlist:
if s in self.srv_socks:
self.accept(s)
else:
self.handle(s)
def handle(self, s):
client = self.sock_to_client(s)
for req in self.recvall(client):
self.parse_exec(s, req)
# find client object for its srv socket
def sock_to_client(self, s):
if self.current_dbg and (s == self.current_dbg.srv_sock):
client = self.current_dbg
else:
clist = [client for client in self.idb_clients if (client.srv_sock == s)]
if not clist:
client = Client(None, s, None)
self.idb_clients.append(client)
else:
client = clist[0]
return client
# buffered readline like function
def recvall(self, client):
try:
data = client.srv_sock.recv(4096)
if data == '':
raise
except:
if client == self.current_dbg:
self.broadcast("debugger closed the connection")
self.dbg_quit()
else:
self.client_quit(client.srv_sock)
self.broadcast("a client quit, nb client(s) left: %d" % len(self.idb_clients))
return []
return client.feed(data)
# parse and execute requests from clients (idbs or dbg)
def parse_exec(self, s, req):
if not (req[0:8] == '[notice]'):
# this is a normal [sync] request from debugger, forward it
self.forward(req)
# receive 'dbg disconnected', socket can be closed
if re.search(self.pat, req):
self.close(s)
return
req = self.normalize(req, 8)
try:
hash = json.loads(req)
except:
print "[-] dispatcher failed to parse json\n %s\n" % req
return
type = hash['type']
if not type in self.req_handlers:
print ("[*] dispatcher unknown request: %s" % type)
return
req_handler = self.req_handlers[type]
req_handler(s, hash)
def normalize(self, req, taglen):
req = req[taglen:]
req = req.replace("\\", "\\\\")
req = req.replace("\n", "")
return req
def puts(self, msg, s):
s.sendall(msg)
# dispatcher announcements are forwarded to the idb
def announcement(self, msg, s=None):
if not s:
if not self.current_idb:
return
s = self.current_idb.client_sock
try:
s.sendall("[notice]{\"type\":\"dispatcher\",\"subtype\":\"msg\",\"msg\":\"%s\"}\n" % msg)
except:
return
# send message to all connected idb clients
def broadcast(self, msg):
for idbc in self.idb_clients:
self.announcement(msg, idbc.client_sock)
# send dbg message to currently active idb client
def forward(self, msg, s=None):
if not s:
if not self.current_idb:
return
s = self.current_idb.client_sock
if s:
s.sendall(msg + "\n")
# send dbg message to all idb clients
def forward_all(self, msg, s=None):
for idbc in self.idb_clients:
self.forward(msg, idbc.client_sock)
# disable current idb and enable new idb matched from current module name
def switch_idb(self, new_idb):
msg = "[sync]{\"type\":\"broker\",\"subtype\":\"%s\"}\n"
if (not self.current_idb == new_idb) & (self.current_idb.enabled):
self.current_idb.client_sock.sendall(msg % "disable_idb")
self.current_idb.enabled = False
if new_idb:
new_idb.client_sock.sendall(msg % "enable_idb")
self.current_idb = new_idb
new_idb.enabled = True
# a new idb client connects to the dispatcher via its broker
def req_new_client(self, srv_sock, hash):
port, name = hash['port'], hash['idb']
try:
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_sock.connect(('localhost', port))
self.opened_socks.append(client_sock)
except:
self.opened_socks.remove(srv_sock)
srv_sock.close()
return
# check if an idb client is already registered with the same name
conflicting = [cli
|
Turivniy/Python_koans
|
python2/koans/about_methods.py
|
Python
|
mit
| 5,806
| 0.001206
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Partially based on AboutMethods in the Ruby Koans
#
from runner.koan import *
def my_global_function(a, b):
return a + b
class AboutMethods(Koan):
def test_calling_a_global_function(self):
self.assertEqual(5, my_global_function(2, 3))
# NOTE: Wrong number of arguments i
|
s not a SYNTAX error, but a
# runtime error.
def test_calling_fun
|
ctions_with_wrong_number_of_arguments(self):
try:
my_global_function()
except Exception as exception:
# NOTE: The .__name__ attribute will convert the class
# into a string value.
self.assertEqual(exception.__class__.__name__,
exception.__class__.__name__)
self.assertMatch(
r'my_global_function\(\) takes exactly 2 arguments \(0 given\)',
exception[0])
try:
my_global_function(1, 2, 3)
except Exception as e:
# Note, watch out for parenthesis. They need slashes in front!
self.assertMatch(r'my_global_function\(\) takes exactly 2 arguments \(3 given\)', e[0])
# ------------------------------------------------------------------
def pointless_method(self, a, b):
sum = a + b
def test_which_does_not_return_anything(self):
self.assertEqual(None, self.pointless_method(1, 2))
# Notice that methods accessed from class scope do not require
# you to pass the first "self" argument?
# ------------------------------------------------------------------
def method_with_defaults(self, a, b='default_value'):
return [a, b]
def test_calling_with_default_values(self):
self.assertEqual([1, 'default_value'], self.method_with_defaults(1))
self.assertEqual([1, 2], self.method_with_defaults(1, 2))
# ------------------------------------------------------------------
def method_with_var_args(self, *args):
return args
def test_calling_with_variable_arguments(self):
self.assertEqual((), self.method_with_var_args())
self.assertEqual(('one', ), self.method_with_var_args('one'))
self.assertEqual(('one', 'two'), self.method_with_var_args('one', 'two'))
# ------------------------------------------------------------------
def function_with_the_same_name(self, a, b):
return a + b
def test_functions_without_self_arg_are_global_functions(self):
def function_with_the_same_name(a, b):
return a * b
self.assertEqual(12, function_with_the_same_name(3, 4))
def test_calling_methods_in_same_class_with_explicit_receiver(self):
def function_with_the_same_name(a, b):
return a * b
self.assertEqual(7, self.function_with_the_same_name(3, 4))
# ------------------------------------------------------------------
def another_method_with_the_same_name(self):
return 10
link_to_overlapped_method = another_method_with_the_same_name
def another_method_with_the_same_name(self):
return 42
def test_that_old_methods_are_hidden_by_redefinitions(self):
self.assertEqual(42, self.another_method_with_the_same_name())
def test_that_overlapped_method_is_still_there(self):
self.assertEqual(10, self.link_to_overlapped_method())
# ------------------------------------------------------------------
def empty_method(self):
pass
def test_methods_that_do_nothing_need_to_use_pass_as_a_filler(self):
self.assertEqual(None, self.empty_method())
def test_pass_does_nothing_at_all(self):
"You"
"shall"
"not"
pass
self.assertEqual(True, "Still got to this line" != None)
# ------------------------------------------------------------------
def one_line_method(self): return 'Madagascar'
def test_no_indentation_required_for_one_line_statement_bodies(self):
self.assertEqual('Madagascar', self.one_line_method())
# ------------------------------------------------------------------
def method_with_documentation(self):
"A string placed at the beginning of a function is used for documentation"
return "ok"
def test_the_documentation_can_be_viewed_with_the_doc_method(self):
self.assertMatch("A string placed at the beginning of a function is used for documentation", self.method_with_documentation.__doc__)
# ------------------------------------------------------------------
class Dog(object):
def name(self):
return "Fido"
def _tail(self):
# Prefixing a method with an underscore implies private scope
return "wagging"
def __password(self):
return 'password' # Genius!
def test_calling_methods_in_other_objects(self):
rover = self.Dog()
self.assertEqual('Fido', rover.name())
def test_private_access_is_implied_but_not_enforced(self):
rover = self.Dog()
# This is a little rude, but legal
self.assertEqual('wagging', rover._tail())
def test_double_underscore_attribute_prefixes_cause_name_mangling(self):
"""Attributes names that start with a double underscore get
mangled when an instance is created."""
rover = self.Dog()
try:
#This may not be possible...
password = rover.__password()
except Exception as ex:
self.assertEqual('AttributeError', ex.__class__.__name__)
# But this still is!
self.assertEqual('password', rover._Dog__password())
# Name mangling exists to avoid name clash issues when subclassing.
# It is not for providing effective access protection
|
andreaso/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_permissions.py
|
Python
|
gpl-3.0
| 10,061
| 0.002286
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_permissions
short_description: "Module to manage permissions of users/groups in oVirt/RHV"
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage permissions of users/groups in oVirt/RHV"
options:
role:
description:
- "Name of the role to be assigned to user/group on specific object."
default: UserRole
state:
description:
- "Should the permission be present/absent."
choices: ['present', 'absent']
default: present
object_id:
description:
- "ID of the object where the permissions should be managed."
object_name:
description:
- "Name of the object where the permissions should be managed."
object_type:
description:
- "The object where the permissions should be managed."
default: 'vm'
choices: [
'data_center',
'cluster',
'host',
'storage_domain',
'network',
'disk',
'vm',
'vm_pool',
'template',
'cpu_profile',
'disk_profile',
'vnic_profile',
'system',
]
user_name:
description:
- "Username of the user to manage. In most LDAPs it's I(uid) of the user,
but in Active Directory you must specify I(UPN) of the user."
- "Note that if user don't exist in the system this module will fail,
you should ensure the user exists by using M(ovirt_users) module."
group_name:
description:
- "Name of the group to manage."
- "Note that if group don't exist in the system this module will fail,
you should ensure the group exists by using M(ovirt_groups) module."
authz_name:
description:
- "Authorization provider of the user/group. In previous versions of oVirt/RHV known as domain."
required: true
aliases: ['domain']
namespace:
description:
- "Namespace of the authorization provider, where user/group resides."
required: false
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add user user1 from authorization provider example.com-authz
- ovirt_permissions:
user_name: user1
authz_name: example.com-authz
object_type: vm
object_name: myvm
role: UserVmManager
# Remove permission from user
- ovirt_permissions:
state: absent
user_name: user1
authz_name: example.com-authz
object_type: cluster
object_name: mycluster
role: ClusterAdmin
'''
RETURN = '''
id:
description: ID of the permission which is managed
returned: On success if permission is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
permission:
description: "Dictionary of all the permission attributes. Permission attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission."
returned: On success if permission is found.
type: dict
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
follow_link,
get_link_name,
ovirt_full_argument_spec,
search_by_attributes,
search_by_name,
)
def _objects_service(connection, object_type):
if object_type == '
|
system':
return connection.system_service()
return getattr(
connection.system_service(),
'%ss_service' % object_type,
None,
)()
def _object_service(connection, module):
object_type = module.params['object_type']
objects_service = _objects_service(connection, object_type)
|
if object_type == 'system':
return objects_service
object_id = module.params['object_id']
if object_id is None:
sdk_object = search_by_name(objects_service, module.params['object_name'])
if sdk_object is None:
raise Exception(
"'%s' object '%s' was not found." % (
module.params['object_type'],
module.params['object_name']
)
)
object_id = sdk_object.id
return objects_service.service(object_id)
def _permission(module, permissions_service, connection):
for permission in permissions_service.list():
user = follow_link(connection, permission.user)
if (
equal(module.params['user_name'], user.principal if user else None) and
equal(module.params['group_name'], get_link_name(connection, permission.group)) and
equal(module.params['role'], get_link_name(connection, permission.role))
):
return permission
class PermissionsModule(BaseModule):
def _user(self):
user = search_by_attributes(
self._connection.system_service().users_service(),
usrname="{name}@{authz_name}".format(
name=self._module.params['user_name'],
authz_name=self._module.params['authz_name'],
),
)
if user is None:
raise Exception("User '%s' was not found." % self._module.params['user_name'])
return user
def _group(self):
groups = self._connection.system_service().groups_service().list(
search="name={name}".format(
name=self._module.params['group_name'],
)
)
# If found more groups, filter them by namespace and authz name:
# (filtering here, as oVirt/RHV backend doesn't support it)
if len(groups) > 1:
groups = [
g for g in groups if (
equal(self._module.params['namespace'], g.namespace) and
equal(self._module.params['authz_name'], g.domain.name)
)
]
if not groups:
raise Exception("Group '%s' was not found." % self._module.params['group_name'])
return groups[0]
def build_entity(self):
entity = self._group() if self._module.params['group_name'] else self._user()
return otypes.Permission(
user=otypes.User(
id=entity.id
) if self._module.params['user_name'] else None,
group=otypes.Group(
id=entity.id
) if self._module.params['group_name'] else None,
role=otypes.Role(
name=self._module.params['role']
),
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
role=dict(default='UserRole'),
object_type=dict(
default='vm',
choices=[
'data_center',
'cluster',
'host',
'storage_domain',
'network',
'disk',
|
kbdancer/TPLINKKEY
|
scan.py
|
Python
|
mit
| 5,658
| 0.002301
|
#!/usr/bin/env python
# coding=utf-8
# code by kbdancer@92ez.com
from threading import Thread
from telnetlib import Telnet
import requests
import sqlite3
import queue
import time
import sys
import os
def ip2num(ip):
ip = [int(x) for x in ip.split('.')]
return ip[0] << 24 | ip[1] << 16 | ip[2] << 8 | ip[3]
def num2ip(num):
return '%s.%s.%s.%s' % ((num & 0xff000000) >> 24, (num & 0x00ff0000) >> 16, (num & 0x0000ff00) >> 8, num & 0x000000ff)
def ip_range(start, end):
return [num2ip(num) for num in range(ip2num(start), ip2num(end) + 1) if num & 0xff]
class Database:
db = sys.path[0] + "/TPLINK_KEY.db"
charset = 'utf8'
def __init__(self):
self.connection = sqlite3.connect(self.db)
self.connection.text_factory = str
self.cursor = self.connection.cursor()
def insert(self, query, params):
try:
self.cursor.execute(query, params)
self.connection.commit()
except Exception as e:
print(e)
self.connection.rollback()
def update(self, query, params):
try:
self.cursor.execute(query, params)
self.connection.commit()
except Exception as e:
print(e)
self.connection.rollback()
def query(self, query, params):
cursor = self.connection.cursor()
cursor.execute(query, params)
return cursor.fetchall()
def __del__(self):
self.connection.close()
def b_thre
|
ad(ip_address_list):
thread_list = []
queue_list = queue.Queue()
hosts = ip_address_list
for host in hosts:
queue_list.put(host)
for x in range(0, int(sys.argv[1])):
thread_list.append(tThread(queue_list))
for
|
t in thread_list:
try:
t.daemon = True
t.start()
except Exception as e:
print(e)
for t in thread_list:
t.join()
class tThread(Thread):
def __init__(self, queue_obj):
Thread.__init__(self)
self.queue = queue_obj
def run(self):
while not self.queue.empty():
host = self.queue.get()
try:
get_info(host)
except Exception as e:
print(e)
continue
def get_position_by_ip(host):
try:
ip_url = "http://ip-api.com/json/{ip}?lang=zh-CN".format(ip=host)
header = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0"}
json_data = requests.get(url=ip_url, headers=header, timeout=10).json()
info = [json_data.get("country"), json_data.get('regionName'), json_data.get('city'), json_data.get('isp')]
return info
except Exception as e:
print(e)
def get_info(host):
username = "admin"
password = "admin"
telnet_timeout = 15
cmd_timeout = 5
try:
t = Telnet(host, timeout=telnet_timeout)
t.read_until("username:", cmd_timeout)
t.write(username + "\n")
t.read_until("password:", cmd_timeout)
t.write(password + "\n")
t.write("wlctl show\n")
t.read_until("SSID", cmd_timeout)
wifi_str = t.read_very_eager()
t.write("lan show info\n")
t.read_until("MACAddress", cmd_timeout)
lan_str = t.read_very_eager()
t.close()
if len(wifi_str) > 0:
# clear extra space
wifi_str = "".join(wifi_str.split())
wifi_str = wifi_str
# get SID KEY MAC
wifi_ssid = wifi_str[1:wifi_str.find('QSS')]
wifi_key = wifi_str[wifi_str.find('Key=') + 4:wifi_str.find('cmd')] if wifi_str.find('Key=') != -1 else '无密码'
router_mac = lan_str[1:lan_str.find('__')].replace('\r\n', '')
current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
try:
my_sqlite_db = Database()
query_info = """select * from scanlog where ssid=? and key=? and mac=?"""
query_result = my_sqlite_db.query(query_info, [wifi_ssid, wifi_key, router_mac])
if len(query_result) < 1:
position_data = get_position_by_ip(host)
country = position_data[0]
province = position_data[1]
city = position_data[2]
isp = position_data[3]
insert_info = """INSERT INTO scanlog (`host`,`mac`,`ssid`,`wifikey`,`country`,`province`,`city`,`isp`) VALUES (?,?,?,?,?,?,?,?)"""
my_sqlite_db.insert(insert_info, [host, router_mac, wifi_ssid, wifi_key, country, province, city, isp])
print('[√] [%s] Info %s %s %s => Inserted!' % (current_time, host, wifi_ssid, wifi_key))
else:
print('[x] [%s] Found %s %s %s in DB, do nothing!' % (current_time, host, wifi_ssid, wifi_key))
except Exception as e:
print(e)
except Exception as e:
pass
if __name__ == '__main__':
print('==========================================')
print(' Scan TPLINK(MERCURY) wifi key by telnet')
print(' Author 92ez.com')
print('==========================================')
begin_ip = sys.argv[2].split('-')[0]
end_ip = sys.argv[2].split('-')[1]
ip_list = ip_range(begin_ip, end_ip)
current_pid = os.getpid()
print('\n[*] Total %d IP...' % len(ip_list))
print('\n================ Running =================')
try:
b_thread(ip_list)
except KeyboardInterrupt:
print('\n[*] Kill all thread.')
os.kill(current_pid, 9)
|
hos7ein/firewalld
|
src/firewall/functions.py
|
Python
|
gpl-2.0
| 17,408
| 0.004193
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007,2008,2011,2012 Red Hat, Inc.
#
# Authors:
# Thomas Woerner <twoerner@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__all__ = [ "PY2", "getPortID", "getPortRange", "portStr", "getServiceName",
"checkIP", "checkIP6", "checkIPnMask", "checkIP6nMask",
"checkProtocol", "checkInterface", "checkUINT32",
"firewalld_is_active", "tempFile", "readfile", "writefile",
"enable_ip_forwarding", "get_nf_conntrack_helper_setting",
"set_nf_conntrack_helper_setting", "get_nf_conntrack_helpers",
"get_nf_nat_helpers", "check_port", "check_address",
"check_single_address", "check_mac", "uniqify", "ppid_of_pid",
"max_zone_name_len", "checkUser", "checkUid", "checkCommand",
"checkContext", "joinArgs", "splitArgs",
"b2u", "u2b", "u2b_if_py2" ]
import socket
import os
import os.path
import shlex
import pipes
import re
import string
import sys
import tempfile
from firewall.core.logger import log
from firewall.core.prog import runProg
from firewall.config import FIREWALLD_TEMPDIR, FIREWALLD_PIDFILE, COMMANDS
PY2 = sys.version < '3'
def getPortID(port):
""" Check and Get port id from port string or port id using socket.getservbyname
@param port port string or port id
@return Port id if valid, -1 if port can not be found and -2 if port is too big
"""
if isinstance(port, int):
_id = port
else:
if port:
port = port.strip()
try:
_id = int(port)
except ValueError:
try:
_id = socket.getservbyname(port)
except socket.error:
return -1
if _id > 65535:
return -2
return _id
def getPortRange(ports):
""" Get port range for port range string or single port id
@param ports an integer or port string or port range string
@return Array containing start and end port id for a valid range or -1 if port can not be found and -2 if port is too big for integer input or -1 for invalid ranges or None if the range is ambiguous.
"""
# "<port-id>" case
if isinstance(ports, int) or ports.isdigit():
id1 = getPortID(ports)
if id1 >= 0:
return (id1,)
return id1
splits = ports.split("-")
# "<port-id>-<port-id>" case
if len(splits) == 2 and splits[0].isdigit() and splits[1].isdigit():
id1 = getPortID(splits[0])
id2 = getPortID(splits[1])
if id1 >= 0 and id2 >= 0:
if id1 < id2:
return (id1, id2)
elif id1 > id2:
return (id2, id1)
else: # ids are the same
return (id1,)
# everything else "<port-str>[-<port-str>]"
matched = [ ]
for i in range(len(splits), 0, -1):
id1 = getPortID("-".join(splits[:i]))
port2 = "-".join(splits[i:])
if len(port2) > 0:
id2 = getPortID(port2)
if id1 >= 0 and id2 >= 0:
if id1 < id2:
matched.append((id1, id2))
elif id1 > id2:
matched.append((id2, id1))
else:
matched.append((id1, ))
else:
if id1 >= 0:
matched.append((id1,))
if i == len(splits):
# full match, stop here
break
if len(matched) < 1:
return -1
elif len(matched) > 1:
return None
return matched[0]
def portStr(port, delimiter=":"):
""" Create port and port range string
@param port port or port range int or [int, int]
@param delimiter of the output string for port ranges, default ':'
@return Port or port range string, empty string if port isn't specified, None if port or port range is not valid
"""
if port == "":
return ""
_range = getPortRange(port)
if isinstance(_range, int) and _range < 0:
return None
elif len(_range) == 1:
return "%s" % _range
else:
return "%s%s%s" % (_range[0], delimiter, _range[1])
def portInPortRange(port, range):
_port = getPortID(port)
_range = getPortRange(range)
if len(_range) == 1:
return _port == getPortID(_range[0])
if len(_range) == 2 and \
_port >= getPortID(_range[0]) and _port <= getPortID(_range[1]):
return True
return False
def getServiceName(port, proto):
""" Check and Get service name from port and proto string combination using socket.getservbyport
@param port string or id
@param protocol string
@return Service name if port and protocol are valid, else None
"""
try:
name = socket.getservbyport(int(port), proto)
except socket.error:
return None
return name
def checkIP(ip):
""" Check IPv4 address.
@param ip address string
@return True if address is valid, else False
"""
try:
socket.inet_pton(socket.AF_INET, ip)
except socket.error:
return False
return True
def checkIP6(ip):
""" Check IPv6 address.
@param ip address string
@return True if address is valid, else False
"""
try:
socket.inet_pton(socket.AF_INET6, ip)
except socket.error:
return False
return True
def checkIPnMask(ip):
if "/" in ip:
addr = ip[:ip.index("/")]
mask = ip[ip.index("/")+1:]
if len(addr) < 1 or len(mask) < 1:
return False
else:
addr = ip
mask = None
if not checkIP(addr):
return False
if mask:
if "." in mask:
return checkIP(mask)
else:
try:
i = int(mask)
except ValueError:
return False
if i < 0 or i > 32:
ret
|
urn False
return True
def checkIP6nMask(ip):
if "/" in ip:
addr = ip[:ip.index("/")]
mask = ip[ip.index("/")+1:]
if len(addr) < 1 or len(mask) < 1:
return False
else:
addr = ip
mask = None
if not checkIP6(addr):
return False
if mask:
try:
i = int(mask)
except ValueError:
return False
if i < 0 or i > 128:
|
return False
return True
def checkProtocol(protocol):
try:
i = int(protocol)
except ValueError:
# string
try:
socket.getprotobyname(protocol)
except socket.error:
return False
else:
if i < 0 or i > 255:
return False
return True
def checkInterface(iface):
""" Check interface string
@param interface string
@return True if interface is valid (maximum 16 chars and does not contain ' ', '/', '!', ':', '*'), else False
"""
if not iface or len(iface) > 16:
return False
for ch in [ ' ', '/', '!', '*' ]:
# !:* are limits for iptables <= 1.4.5
if ch in iface:
return False
# disabled old iptables check
#if iface == "+":
# # limit for iptables <= 1.4.5
# return False
return True
def checkUINT32(val):
try:
x = int(val, 0)
except ValueError:
return False
else:
if x >= 0 and x <= 4294967295:
return True
return False
def firewalld_is_active():
""" Check if firewalld is active
@return True if there is a firewalld pid file and the pid is used by firewalld
"""
if not os.path.exists(FIREWALLD_PIDFIL
|
pyfa-org/Pyfa
|
gui/utils/anim_effects.py
|
Python
|
gpl-3.0
| 1,706
| 0
|
import math
def OUT_CIRC(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t = t / d - 1
return c * math.sqrt(1 - t * t) + b
def OUT_QUART(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t = t / d - 1
return -c * (t * t * t * t - 1) + b
def INOUT_CIRC(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t1 = t / (d / 2)
if (t / (d / 2)) < 1:
return -c / 2 * (math.sqrt(1 - (t / (d / 2)) ** 2) - 1) + b
else:
return c / 2 * (math.sqrt(1 - (t1 - 2) ** 2) + 1) + b
def IN_CUBIC(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t /= d
return
|
c * t * t * t + b
def OUT_QUAD(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t /= d
return -c * t * (t - 2) + b
def OUT_BOUNCE(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(
|
d)
t /= d
if t < (1 / 2.75):
return c * (7.5625 * t * t) + b
elif t < (2 / 2.75):
t -= (1.5 / 2.75)
return c * (7.5625 * t * t + .75) + b
elif t < (2.5 / 2.75):
t -= (2.25 / 2.75)
return c * (7.5625 * t * t + .9375) + b
else:
t -= (2.625 / 2.75)
return c * (7.5625 * t * t + .984375) + b
def INOUT_EXP(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t1 = t / (d / 2)
if t == 0:
return b
elif t == d:
return b + c
elif t1 < 1:
return c / 2 * math.pow(2, 10 * (t1 - 1)) + b - c * 0.0005
else:
return c / 2 * 1.0005 * (-math.pow(2, -10 * (t1 - 1)) + 2) + b
|
kiddinn/plaso
|
tests/parsers/esedb_plugins/msie_webcache.py
|
Python
|
apache-2.0
| 3,923
| 0.001784
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Internet Explorer WebCache database."""
import unittest
from plaso.lib import definitions
from plaso.parsers.esedb_plugins import msie_webcache
from tests.parsers.esedb_plugins import test_lib
class MsieWebCacheESEDBPluginTest(test_lib.ESEDBPluginTestCase):
"""Tests for the MSIE WebCache ESE database plugin."""
# pylint: disable=protected-access
def testConvertHeadersValues(self):
"""Tests the _ConvertHeadersValues function."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
binary_value = (
b'HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n'
b'X-Content-Type-Options: nosniff\r\nContent-Length: 2759\r\n'
b'X-XSS-Protection: 1; mode=block\r\n'
b'Alternate-Protocol: 80:quic\r\n\r\n')
expected_headers_value = (
'[HTTP/1.1 200 OK; Content-Type: image/png; '
'X-Content-Type-Options: nosniff; Content-Length: 2759; '
'X-XSS-Protection: 1; mode=block; '
'Alternate-Protocol: 80:quic]')
headers_value = plugin._ConvertHeadersValues(binary_value)
self.assertEqual(headers_value, expected_headers_value)
def testProcessOnDatabaseWithPartitionsTable(self):
"""Tests the Process function on database with a Partitions table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(['WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_events, 1354)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# The order in which ESEDBPlugin._GetRecordValues() generates events is
# nondeterministic hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'container_identifier': 1,
'data_type': 'msie:webcache:containers',
'date_time': '2014-05-12 07:30:25.4861987',
'directory': (
'C:\\User
|
s\\test\\AppData\\Local\\Microsoft\\Windows\\'
'INetCache\\IE\\'),
'name': 'Content',
'set_identifier': 0,
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS}
self.CheckEventValues(storage_writer, events[567], expected_event_values)
def testProcessOnDatabaseWithPartitionsExTable(self):
"""Tests the Process function on database with a PartitionsEx table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileW
|
ithPlugin(
['PartitionsEx-WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_events, 4014)
self.assertEqual(storage_writer.number_of_extraction_warnings, 3)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# The order in which ESEDBPlugin._GetRecordValues() generates events is
# nondeterministic hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'access_count': 5,
'cache_identifier': 0,
'cached_file_size': 726,
'cached_filename': 'b83d57c0[1].svg',
'container_identifier': 14,
'data_type': 'msie:webcache:container',
'date_time': '2019-03-20 17:22:14.0000000',
'entry_identifier': 63,
'sync_count': 0,
'response_headers': (
'[HTTP/1.1 200; content-length: 726; content-type: image/svg+xml; '
'x-cache: TCP_HIT; x-msedge-ref: Ref A: 3CD5FCBC8EAD4E0A80FA41A62'
'FBC8CCC Ref B: PRAEDGE0910 Ref C: 2019-12-16T20:55:28Z; date: '
'Mon, 16 Dec 2019 20:55:28 GMT]'),
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'url': 'https://www.bing.com/rs/3R/kD/ic/878ca0cd/b83d57c0.svg'}
self.CheckEventValues(storage_writer, events[100], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
jbedorf/tensorflow
|
tensorflow/python/data/experimental/kernel_tests/serialization/dataset_serialization_test_base.py
|
Python
|
apache-2.0
| 26,060
| 0.004298
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing serializable datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.data.experimental.ops import iterator_ops as contrib_iterator_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util import nest
def remove_variants(get_next_op):
# TODO(b/72408568): Remove this once session.run can get
# variant tensors.
"""Remove variants from a nest structure, so sess.run will execute."""
def _remove_variant(x):
if isinstance(x, ops.Tensor) and x.dtype == dtypes.variant:
return ()
else:
return x
return nest.map_structure(_remove_variant, get_next_op)
class DatasetSerializationTestBase(test.TestCase):
"""Base class for testing serializable datasets."""
def tearDown(self):
self._delete_ckpt()
# TODO(b/72657739): Remove sparse_tensor argument, which is to test the
# (deprecated) saveable `SparseTensorSliceDataset`, once the API
# `from_sparse_tensor_slices()`and related tests are deleted.
def run_core_tests(self, ds_fn1, ds_fn2, num_outputs, sparse_tensors=False):
"""Runs the core tests.
Args:
ds_fn1: 0-argument function that returns a Dataset.
ds_fn2: 0-argument function that returns a Dataset different from
ds_fn1. If None, verify_restore_in_modified_graph test is not run.
num_outputs: Total number of outputs expected from this Dataset.
sparse_tensors: Whether dataset is built from SparseTensor(s).
Raises:
AssertionError if any test fails.
"""
# NOTE: We disable all default optimizations in serialization tests in order
# to test the actual dataset in question.
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
def ds_fn1_no_opt():
return ds_fn1().with_options(options)
self.verify_unused_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_fully_used_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_exhausted_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_init_before_restore(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_multiple_breaks(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_reset_restored_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_restore_in_empty_graph(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
if ds_fn2:
def ds_fn2_no_opt():
return ds_fn2().with_options(options)
self.verify_restore_in_modified_graph(
ds_fn1_no_opt,
ds_fn2_no_opt,
num_outputs,
sparse_tensors=sparse_tensors)
def verify_unused_iterator(self,
ds_fn,
num_outputs,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that saving and restoring an unused iterator works.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn, [0],
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_fully_used_iterator(self, ds_fn, num_outputs,
sparse_tensors=False):
"""Verifies that saving and restoring a fully used iterator works.
Note that this only checks saving and restoring an iterator from which
`num_outputs` items have been produced but does not check for an
exhausted iterator, i.e., one from which an OutOfRange error has been
returned.
Args:
ds_fn: See `run_core_tests`.
num_outpu
|
ts: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if test fails.
"""
self.verify_run_with_breaks(
ds_fn, [num_outputs], num_outputs, sparse_tensors=sparse_tensors)
|
def verify_exhausted_iterator(self, ds_fn, num_outputs, sparse_tensors=False):
"""Verifies that saving and restoring an exhausted iterator works.
An exhausted iterator is one which has returned an OutOfRange error.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if any test fails.
"""
self.gen_outputs(
ds_fn, [],
num_outputs,
verify_exhausted=True,
sparse_tensors=sparse_tensors)
actual = self.gen_outputs(
ds_fn, [],
0,
ckpt_saved=True,
verify_exhausted=True,
sparse_tensors=sparse_tensors)
self.assertEqual(len(actual), 0)
def verify_init_before_restore(self,
ds_fn,
num_outputs,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that restoring into an already initialized iterator works.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn,
self.gen_break_points(num_outputs),
num_outputs,
init_before_restore=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_multiple_breaks(self,
ds_fn,
num_outputs,
num_breaks=10,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to save/restore at multiple break points.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
num_breaks: The number of break points. These are uniformly spread in
[0, num_outputs] both inclusive.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn,
self.gen_break_points(num_outputs, num_breaks),
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_reset_restored_iterator(self,
ds_fn,
num_outputs,
break_point=None,
sparse_
|
wannaphongcom/flappy
|
flappy/display3d/__init__.py
|
Python
|
mit
| 279
| 0.003584
|
from flappy.display3d.v
|
ertexbuffer3d import VertexBuffer3D, VertexBuffer3DFormat
from flappy.display3d.indexbuffer3d import IndexBuffer3D
from flappy.
|
display3d.program3d import Program3D
from flappy.display3d.texture import Texture
from flappy.display3d.scene3d import Scene3D
|
kaefik/zadanie-python
|
echoserver.py
|
Python
|
mit
| 1,395
| 0.002867
|
import asyncio
import logging
import concurrent.futures
class EchoServer(object):
"""Echo server class"""
def __init__(self, host, port, loop=None):
self._loop = loop or asyncio.get_event_loop()
self._server = asyncio.start_server(self.handle_connection, host=host, port=port)
def start(self, and_loop=True):
self._server = self._loop.run_until_complete(self._server)
logging.info('Listening established on {0}'.format(self._server.sockets[0].getsockname()))
if and_loop:
self._loop.
|
run_forever()
def stop(self, and_loop=True):
self._server.close()
if and_loop:
self._loop.close()
@asyncio.coroutine
def handle_connection(self, reader, writer):
peername = writer.get_extra_info('peername')
logging.info('Accepted connection from {}'.format(peername))
while not reader.at_eof():
try:
dat
|
a = yield from asyncio.wait_for(reader.readline(), timeout=10.0)
writer.write(data)
except concurrent.futures.TimeoutError:
break
writer.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
server = EchoServer('127.0.0.1', 8899)
try:
server.start()
except KeyboardInterrupt:
pass # Press Ctrl+C to stop
finally:
server.stop()
|
arokem/pyAFQ
|
AFQ/utils/tests/test_parallel.py
|
Python
|
bsd-2-clause
| 1,135
| 0
|
import numpy as np
import numpy.testing as npt
import AFQ.utils.parallel as para
def power_it(num, n=2):
# We define a function of the right form for parallelization
return num ** n
def test_parfor():
my_array = np.arange(100).reshape(10, 10)
i, j = np.random.randint(0, 9, 2)
my_list = list(my_array.r
|
avel())
for engine in ["joblib", "dask", "serial"]:
for backend in ["threading", "multiprocessing"]:
npt.assert_equal
|
(para.parfor(power_it,
my_list,
engine=engine,
backend=backend,
out_shape=my_array.shape)[i, j],
power_it(my_array[i, j]))
# If it's not reshaped, the first item should be the item 0, 0:
npt.assert_equal(para.parfor(power_it,
my_list,
engine=engine,
backend=backend)[0],
power_it(my_array[0, 0]))
|
aerobit/sparky
|
sparky.py
|
Python
|
unlicense
| 5,633
| 0.00213
|
#!/usr/bin/env python3.4
# dotslash for local
from flask import Flask, render_template, request, redirect
from werkzeug.contrib.fixers import ProxyFix
from urllib.request import urlopen, Request
from urllib.parse import urlparse
from omxplayer import OMXPlayer
from youtube_dl import YoutubeDL
from youtube_dl.utils import DownloadError
from livestreamer import Livestreamer, PluginError
import os
import traceback
import re
import json
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
player = None
title = None
last_logged_message = ""
# this regex is to escape terminal color codes.
_ANSI_ESCAPE_REXP = re.compile(r"\x1b[^m]*m")
@app.route('/about/')
def splash():
return render_template('splash.html')
@app.route('/')
def root(): # redirect to remote for now, might change.
return redirect('/remote')
@app.route('/remote/')
def remote():
return render_template('remote.html')
@app.route('/settings/')
def settings():
return render_template('settings.html')
@app.route('/remote/omxplayer/<command>') # sending keys from the remote
def omxplayer_remote(command):
player = get_player()
if player is not None:
getattr(player, command)()
return '', 204
else:
return 'nothing playing', 400
@app.route('/remote/system/<command>')
def system_remote(command):
if command == "reboot":
log('rebooting!')
os.system("sudo reboot")
else:
return 'bad command', 400
return '', 204 # success!
@app.route('/status/')
def status():
player = get_player()
if player is not None:
dictionary = {
'video_loaded': True,
'paused': player.paused,
'now_playing': title
}
else:
dictionary = {'video_loaded': False}
return json.dumps(dictionary)
@app.route('/play', methods=['GET'])
def play_url(): # this only plays http urls for now, torrents soon.
global title
url = request.args.get('url') # grab url from /play?url=*
if not url.startswith('http'): # in case the user forgot it
log('url missing http/wrong protocol')
url = 'http://' + url # let's assume it's http, not https
log('received url %s' % url)
log('requesting headers from %s...' % url)
req = Request(url)
req.get_method = lambda: 'HEAD' # only request headers, no content
response = urlopen(req)
ctype = response.headers['content-type']
ctype_split = ctype.split('/') # split into 2 parts
log('headers received. content type is %s' % ctype)
try:
if ctype_split[0] == 'audio' or ctype_split[0] == 'video':
log('url was raw media file, playing! :)')
title = url # i guess this works? :T
play_omxplayer(url)
elif ctype_split[1] == 'x-bittorrent':
log('loading torrents not implemented.')
# this isn't implemented yet.
elif ctype_split[0] == 'text':
# here we check if it's a livestream, and if so get the RTMP url
log('checking if url is a livestream...')
live = Livestreamer()
try:
if "youtube" in url:
raise RuntimeError("youtube is fucked up w/ streaming, falling back to youtube-dl")
plugin = live.resolve_url(url)
streams = plugin.get_streams()
stream = streams.get("best") # fingers crossed for best quality
stream_url_types = ['rtmp', 'url'] # things that livestreamer can have :D
for stream_type in stream_url_types:
if hasattr(stream, stream_type):
log('url is livestream!')
title = "%s (livestream)" % url
play_omxplayer(getattr(stream, stream_type))
return '', 204
except (PluginError, RuntimeError) as e: # therefore url is not (supported) livestream
pass # continue and let youtube-dl try.
log('loading youtube-dl for further processing')
ydl = YoutubeDL({'outtmpl': '%(id)s%(ext)s', 'restrictfilenames': True})
ydl.add_default_info_extractors()
result = ydl.extract_info(url, download=False)
if 'entries' in result: # if video is a playlist
video = result['entries'][0] # play the 1st video in the playlist
else:
video = result
play_omxplayer(video['url'])
title = video['title']
else:
raise DownloadError('Invalid filetype: not audio, video, or text.')
return '', 204 # success w/ no response!
except (UnicodeDecodeError, DownloadError) as e:
return _ANSI_ESCAPE_REXP.sub('', str(e)), 400 # send error message
@app.route("/log/")
def gen_log():
return get_last_logged_message()
def play_omxplayer(uri):
log('playing %s in omxplayer...' % uri)
global player
if get_player() is not None:
player.stop()
player = OMXPlayer(uri,
args='-b -r --audio_queue=10 --video_queue=40',
start_playback
|
=True)
def log(text):
print("[sparky] %s" % text)
global last_logged_message
last_logged_message = text
def get_last_logged_message():
global last_logged_m
|
essage
return last_logged_message
def get_player():
global player
if player is not None and player.has_finished():
player = None
title = None
return player
if __name__ == '__main__':
app.run("0.0.0.0", debug=True)
|
twisted/quotient
|
xquotient/test/historic/test_mta2to3.py
|
Python
|
mit
| 512
| 0.001953
|
from axiom.test.historic.stubloader import StubbedTest
from xquotient.mail import MailTransfer
|
Agent
from axiom.userbase import LoginSystem
class MTAUpgraderTest(StubbedTest):
def testMTA2to3(self):
"""
Make sure MailTransferAgent upgraded OK and that its
"userbase" attribute refers to the store's userbase.
"""
mta = self.store.fin
|
dUnique(MailTransferAgent)
self.assertIdentical(mta.userbase,
self.store.findUnique(LoginSystem))
|
matrixise/gateway
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,914
| 0.00718
|
# -*- coding: utf-8 -*-
#
# Gateway documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 25 06:46:30 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.append(os.path.abspath('_themes'))
sys.path.append(os.path.abspath('.'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.1'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Gateway'
copyright = u'2012, Stephane Wirtel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import pkg_resources
try:
release = pkg_resources.get_distribution('gateway').version
except pkg_resources.DistributionNotFound:
print 'To build the documentation, The distribution information of Gateway'
print 'Has to be available. Either install the package into your'
print 'development environment or run "setup.py develop" to setup the'
print 'metadata. A virtualenv is recommended!'
sys.exit(1)
del pkg_resources
if 'dev' in release:
release = release.split('dev')[0] + 'dev'
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role
|
(used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' w
|
ill be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
pygments_style = 'flask_theme_support.FlaskyStyle'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Gatewaydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'fontpkg' : r'\usepackage{mathpazo}',
'papersize' : 'a4paper',
'pointsize' : '12pt',
'preamble' : r' \usepackage{flaskstyle}',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Gateway.tex', u'Gateway Documentation',
u'Stephane Wirtel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
latex_use_modindex = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
latex_additional_files = [
'flaskstyle.sty',
]
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gateway', u'Gateway Documentation',
[u'Stephane Wirtel'], 1)
]
# If true, show URL addresses after externa
|
prheenan/BioModel
|
BellZhurkov/Python/TestExamples/Examples/Bell_Examples.py
|
Python
|
gpl-2.0
| 3,266
| 0.013778
|
# force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append("../../../../")
import BellZhurkov.Python.TestExamples.TestUtil.Bell_Test_Data as Data
import BellZhurkov.Python.Code.BellZhurkov as BellModel
def RunWoodsideFigure6():
"""
Reproduces Figure 6 From:
Woodside, Michael T., and Steven M. Block.
"Reconstructing Folding Energy Landscapes by Single-Molecule Force Spectroscopy"
Annual Review of Biophysics 43, no. 1 (2014): 19-39.
doi:10.1146/annurev-biophys-051013-022754.
See TestExamples.TestUtil.Bell_Test_Data.Woodside2014FoldingAndUnfoldingData
"""
BellData = Data.Woodside2014FoldingAndUnfoldingData()
Forces,Folding,Unfolding = (BellData.Forces,BellData.RatesFold,
BellData.RatesUnfold)
# everything in SI initially
vary = dict(beta=False,
k0=False,
DeltaG=True,
DeltaX=True)
GuessDict = dict(beta=1/(4.1e-21),
k0=1,
DeltaX=20e-9,
DeltaG=0)
opt = dict(Values=GuessDict,
Vary=vary)
infFold = BellModel.BellZurkovFit(Forces,Folding,**opt)
infUnfold = BellModel.BellZurkovFit(Forces,Unfolding,**opt)
# get predictions along a (slightly larger) x range
xMin=11e-12
xMax=15e-12
# how much should we interpolate?
numPredict = (len(Forces)+1)*50
xRangePredict = np.linspace(xMin,xMax,numPredict)
predictFold = infFold.Predict(xRangePredict)
predictUnfold = infUnfold.Predict(xRangePredict)
markerDict = dict(marker='o',
markersize=7,
linewidth=0,
markeredgewidth=0.0)
lineDict = dict(linestyle='-',color='k',linewidth=1.5)
toPn = 1e12
ForcePn = Forces*toPn
fig = plt.figure()
ax = plt.subplot(1,1,1)
plt.plot(ForcePn,Folding,'ro',label="Folding",**markerDict)
plt.plot(xRangePredict*toPn,predictFold,**lineDict)
plt.plot(ForcePn,Unfolding,'bo',label="Unfolding",**markerDict)
plt.plot(xRangePredict*toPn,predictUnfold,**lineDict)
ax.set_yscale('log')
# limits in PicoNewtons
plt.xlim(xMin*toPn,xMax*toPn)
plt.xlabel("Force (pN)")
plt.ylabel("Rate (Hz)")
|
plt.title("Woodside and Block, Figure 6a (2016)")
plt.legend(loc='lower center')
fig.savefig("./Woodside2016_Figure6.png")
def RunSchlierf2006Figure1a():
DataToTest = Data.Schlierf2006Figure1a()
Forces,Folding = (DataToTest.Forces,DataToTest.RatesFold)
# everything in SI initially
vary = dict(beta=False,
|
k0=True,
DeltaG=False,
DeltaX=True)
GuessDict = dict(beta=1/(4.1e-21),
k0=0.35,
DeltaX=5e-10,
DeltaG=0)
opt = dict(Values=GuessDict,
Vary=vary)
infFold = BellModel.BellZurkovFit(Forces,Folding,**opt)
def run():
"""
Runs examples of the Bell-Zhurkov Model
"""
RunSchlierf2006Figure1a()
RunWoodsideFigure6()
if __name__ == "__main__":
run()
|
ahmadiga/min_edx
|
lms/djangoapps/survey/models.py
|
Python
|
agpl-3.0
| 8,631
| 0.001738
|
"""
Models to support Course Surveys feature
"""
import logging
from lxml import etree
from collections import OrderedDict
from django.db import models
from student.models import User
from django.core.exceptions import ValidationError
from model_utils.models import TimeStampedModel
from survey.exceptions import SurveyFormNameAlreadyExists, SurveyFormNotFound
from xmodule_django.models import CourseKeyField
log = logging.getLogger("edx.survey")
class SurveyForm(TimeStampedModel):
"""
Model to define a Survey Form that contains the HTML form data
that is presented to the end user. A SurveyForm is not tied to
a particular run of a course, to allow for sharing of Surveys
across courses
"""
name = models.CharField(max_length=255, db_index=True, unique=True)
form = models.TextField()
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
"""
Override save method so we can validate that the form HTML is
actually parseable
"""
self.validate_form_html(self.form)
# now call the actual save method
super(SurveyForm, self).save(*args, **kwargs)
@classmethod
def validate_form_html(cls, html):
"""
Makes sure that the html that is contained in the form field is valid
"""
try:
fields = cls.get_field_names_from_html(html)
except Exception as ex:
log.exception("Cannot parse SurveyForm html: {}".format(ex))
raise ValidationError("Cannot parse SurveyForm as HTML: {}".format(ex))
if not len(fields):
raise ValidationError("SurveyForms must contain at least one form input field")
@classmethod
def create(cls, name, form, update_if_exists=False):
"""
Helper class method to create a new Survey Form.
update_if_exists=True means that if a form already exists with that name, then update it.
Otherwise throw an SurveyFormAlreadyExists exception
"""
survey = cls.get(name, throw_if_not_found=False)
if not survey:
survey = SurveyForm(name=name, form=form)
else:
if update_if_exists:
survey.form = form
else:
raise SurveyFormNameAlreadyExists()
survey.save()
return survey
@classmethod
def get(cls, name, throw_if_not_found=True):
"""
Helper class method to look up a Survey Form, throw FormItemNotFound if it does not exists
in the database, unless throw_if_not_found=False then we return None
"""
survey = None
exists = SurveyForm.objects.filter(name=name).exists()
if exists:
survey = SurveyForm.objects.get(name=name)
elif throw_if_not_found:
raise SurveyFormNotFound()
return survey
def get_answers(self, user=None, limit_num_users=10000):
"""
Returns all answers for all users for this Survey
"""
return SurveyAnswer.get_answers(self, user, limit_num_users=limit_num_users)
def has_user_answered_survey(self, user):
"""
Returns whether a given user has supplied answers to this
survey
"""
return SurveyAnswer.do_survey_answers_exist(self, user)
def save_user_answers(self, user, answers, course_key):
"""
Store answers to the form for a given user. Answers is a dict of simple
name/value pairs
IMPORTANT: There is no validaton of form answers at this point. All data
supplied to this method is presumed to be previously validated
"""
# first remove any answer the user might have done before
self.clear_user_answers(user)
SurveyAnswer.save_answers(self, user, answers, course_key)
def clear_user_answers(self, user):
"""
Removes all answers that a user has submitted
"""
SurveyAnswer.objects.filter(form=self, user=user).delete()
def get_field_names(self):
"""
Returns a list of defined field names for all answers in a survey. This can be
helpful for reporting like features, i.e. adding headers to the reports
This is taken from the set of <input> fields inside the form.
"""
return SurveyForm.get_field_names_from_html(self.form)
@classmethod
def get_field_names_from_html(cls, html):
"""
Returns a list of defined field names from a block of HTML
"""
names = []
# make sure the form is wrap in some outer single element
# otherwise lxml can't parse it
# NOTE: This wrapping doesn't change the ability to query it
tree = etree.fromstring(u'<div>{}</div>'.format(html))
input_fields = (
tree.findall('.//input') + tree.findall('.//select') +
tree.findall('.//textarea')
)
for input_field in input_fields:
if 'name' in input_field.keys() and input_field.attrib['name'] not in names:
names.append(input_field.attrib['name'])
return names
class SurveyAnswer(TimeStampedModel):
"""
Model for the answers that a user gives for a particular form in a course
"""
user = models.ForeignKey(User, db_index=True)
form = models.ForeignKey(SurveyForm, db_index=True)
field_name = models.CharField(max_length=255, db_index=True)
field_value = models.CharField(max_length=1024)
# adding the course_id where the end-user answered the survey question
# since it didn't exist in the beginning, it is nullable
course_key = CourseKeyField(max_length=255, db_index=True, null=True)
@classmethod
def do_survey_answers_exist(cls, form, user):
"""
Returns whether a user has any answers for a given SurveyForm for a course
This can be used to determine if a user has taken a CourseSurvey.
"""
return SurveyAnswer.objects.filter(form=form, user=user).exists()
@classmethod
def get_answers(cls, form, user=None, limit_num_users=10000):
"""
Returns all answers a user (or all users, when user=None) has given to an instance of a SurveyForm
Return is a nested dict which are simple name/value pairs with an outer key which is the
user id. For example (where 'field3' is an optional field):
results = {
'1': {
'field1': 'value1',
'field2': 'value2',
},
'2': {
'field1': 'value3',
'field2': 'value4',
'field3': 'value5',
}
:
:
}
limit_num_users is to prevent an unintentional huge, in-memory dictionary.
"""
if user:
answers = SurveyAnswer.objects.filter(form=form, user=user)
else:
answers = SurveyAnswer.objects.filter(form=form)
results = OrderedDict()
num_users = 0
for answer in answers:
user_id = answer.user.id
if u
|
ser_id not in results and num_users < limit_num_users:
results[user_id] =
|
OrderedDict()
num_users = num_users + 1
if user_id in results:
results[user_id][answer.field_name] = answer.field_value
return results
@classmethod
def save_answers(cls, form, user, answers, course_key):
"""
Store answers to the form for a given user. Answers is a dict of simple
name/value pairs
IMPORTANT: There is no validaton of form answers at this point. All data
supplied to this method is presumed to be previously validated
"""
for name in answers.keys():
value = answers[name]
# See if there is an answer stored for this user, form, field_name pair or not
# this will allow for update cases. This does include an additional lookup,
# but write operations will be relatively infrequent
value = answers[name]
defaults = {"field_value": value}
if course
|
bdarnell/tornado_http2
|
setup.py
|
Python
|
apache-2.0
| 702
| 0
|
import sys
try:
import setuptools
from setuptools import setup
except ImportError:
setuptools = None
from dist
|
utils.core import setup
version = '0.0.1'
kwargs = {}
if setuptools is not None:
kwargs['install_requires'] = ['tornado>=4.3']
if sys.version_info < (3, 4):
kwargs['install_requires'].append('enum34')
setup(
name='tornado_http2',
version=version,
packages=['tornado_http2', 'tornado_http2.test'],
package_data={
'tornado_http2': [
'hpack_static_table.txt',
'hpack_huffman_data.txt',
],
'tornado_http2.test': [
|
'test.crt',
'test.key',
],
},
**kwargs)
|
akheron/stango
|
tests/test_generate.py
|
Python
|
mit
| 6,958
| 0.000862
|
import io
import os
import unittest
from stango import Stango
from stango.files import Files
from . import StangoTestCase, make_suite, view_value, view_template
dummy_view = view_value('')
class GenerateTestCase(StangoTestCase):
def setup(self):
self.tmp = self.tempdir()
self.manager = Stango()
self.manager.index_file = 'index.html'
def test_generate_simple(self):
self.manager.files += [
('', view_value('foobar')),
('barfile.txt', view_value('barfoo')),
]
self.manager.generate(self.tmp)
self.eq(sorted(os.listdir(self.tmp)), ['barfile.txt', 'index.html'])
with open(os.path.join(self.tmp, 'index.html')) as fobj:
self.eq(fobj.read(), 'foobar')
with open(os.path.join(self.tmp, 'barfile.txt')) as fobj:
self.eq(fobj.read(), 'barfoo')
def test_generate_dest_is_non_dir(self):
self.manager.files = Files(
('', dummy_view),
)
dest_path = os.path.join(self.tmp, 'dest.txt')
with open(dest_path, 'w') as fobj:
fobj.write('foo')
exc = self.assert_raises(ValueError, self.manager.generate, dest_path)
self.eq(str(exc), "'%s' is not a directory" % dest_path)
# Check the file wasn't modified
self.eq(os.listdir(self.tmp), ['dest.txt'])
with open(os.path.join(self.tmp, 'dest.txt'), 'r') as fobj:
self.eq(fobj.read(), 'foo')
def test_generate_outdir_exists(self):
# Create a file and a directory to outdir
with open(os.path.join(self.tmp, 'foo'), 'w') as fobj:
fobj.write('bar')
os.mkdir(os.path.join(self.tmp, 'dummydir'))
self.eq(sorted(os.listdir(self.tmp)), ['dummydir', 'foo'])
self.manager.files = Files(
('', view_value('baz')),
)
self.manager.generate(self.tmp)
# Check that the old destdir contents were removed
self.eq(os.listdir(self.tmp), ['index.html'])
def test_generate_different_index_file(self):
self.manager.index_file = 'foofile.txt'
self.manager.files += [
('', view_value('foobar')),
('barfile.txt', view_value('barfoo')),
]
self.manager.generate(self.tmp)
self.eq(sorted(os.listdir(self.tmp)), ['barfile.txt', 'foofile.txt'])
with open(os.path.join(self.tmp, 'foofile.txt')) as fobj:
self.eq(fobj.read(), 'foobar')
with open(os.path.join(self.tmp, 'barfi
|
le.txt')) as fobj:
self.eq(fobj.read(), 'barfoo')
def test_view_returns_a_bytes_object(self):
self.manager.files = Files(
('', view_value(b'\xde\xad\xbe\xef')),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'rb') as fobj:
self.eq(fobj.read(), b'\xde\xad\xbe\xef')
def test_
|
view_returns_a_bytearray_object(self):
self.manager.files = Files(
('', view_value(bytearray(b'\xba\xdc\x0f\xfe'))),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'rb') as fobj:
self.eq(fobj.read(), b'\xba\xdc\x0f\xfe')
def test_view_returns_a_filelike_object_with_str_contents(self):
self.manager.files = Files(
('', view_value(io.StringIO('foobar'))),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'r') as fobj:
self.eq(fobj.read(), 'foobar')
def test_view_returns_a_filelike_object_with_bytes_contents(self):
self.manager.files = Files(
('', view_value(io.BytesIO(b'barfoo'))),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'r') as fobj:
self.eq(fobj.read(), 'barfoo')
def test_view_renders_a_template(self):
self.manager.template_dirs.insert(0, self.template_path)
self.manager.files = Files(
('', view_template('value.txt'), {'value': 'foobar'})
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html')) as fobj:
self.eq(fobj.read(), 'value is: foobar')
def test_no_index_file(self):
self.manager.index_file = None
self.manager.files = Files(
('quux/', dummy_view),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "Directory path and no index_file: 'quux/'")
def test_view_returns_None(self):
self.manager.files = Files(
('', view_value(None)),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "The result of view 'value_returner' for path '' is not a str, bytes or bytearray instance or a file-like object")
def test_view_returns_an_integer(self):
self.manager.files = Files(
('foo.txt', view_value(1)),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "The result of view 'value_returner' for path 'foo.txt' is not a str, bytes or bytearray instance or a file-like object")
def test_view_returns_a_filelike_object_with_invalid_contents(self):
class InvalidFile(object):
def read(self):
return 42
self.manager.files = Files(
('', view_value(InvalidFile())),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "Contents of the file-like object, returned by view 'value_returner' for path '', is not a str, bytes or bytearray instance")
def test_post_render_hook(self):
def post_render_hook(context, data):
return data + b' hurr durr'
self.manager.add_hook('post_render_hook', post_render_hook)
self.manager.files = Files(
('', view_value('foobar')),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'rb') as fobj:
self.eq(fobj.read(), b'foobar hurr durr')
def test_post_render_hook_returns_None(self):
self.manager.add_hook('post_render_hook', lambda x, y: None)
self.manager.files = Files(
('', view_value('foobar')),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), 'The result of post_render_hook is not a bytes or bytearray instance for index.html')
def suite():
return make_suite(GenerateTestCase)
|
kerimlcr/ab2017-dpyo
|
ornek/moviepy/moviepy-0.2.2.12/moviepy/video/fx/__init__.py
|
Python
|
gpl-3.0
| 121
| 0
|
"""
T
|
his module contains transformation functions (clip->clip)
One file for one fx. The file's name is the fx's name
""
|
"
|
steeve/libtorrent
|
set_version.py
|
Python
|
bsd-3-clause
| 1,975
| 0.021772
|
#! /usr/bin/env python
import os
import sys
import glob
version = (int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]))
def substitute_file(name):
subst = ''
f = open(name)
for l in f:
if '#define LIBTORRENT_VERSION_MAJOR' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION_MAJOR %d\n' % version[0]
elif '#define LIBTORRENT_VERSION_MINOR' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION_MINOR %d\n' % version[1]
elif '#define LIBTORRENT_VERSION_TINY' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION_TINY %d\n' % version[2]
elif '#define LIBTORRENT_VERSION ' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION "%d.%d.%d.%d"\n' % (version[0], version[1], version[2], version[3])
elif 'AC_INIT([libtorrent-rasterbar]' in l and name.endswith('.ac'):
l = 'AC_INIT([libtorrent-rasterbar],[%d.%d.%d],[arvid@libtorrent.org],\n' % (version[0], version[1], version[2])
elif 'set (VERSION ' in l and name.endswith('.txt'):
l = 'set (VERSION "%d.%d.%d")\n' % (version[0], version[1], version[2])
elif ':Version: ' in l and (name.endswith('.rst') or name.endswith('.py')):
l = ':Version: %d.%d.%d\n' % (version[0], version[1], version[2])
elif 'VERSION = ' in l and name.endswith('Jamfile'):
l = 'VERSION = %d.%d.%d ;\n' % (version[0], version[1], version[2])
elif 'version=' in l and name.endswith('setup.py'):
l = "\tversion = '%d.%d.%d',\n" % (version[0], version[1], version[2])
elif "version = '" in l and name.endswith('setup.py'):
l = "\tversion = '%d.%
|
d.%d',\n" % (version[0], version[1], version[2])
subst += l
f.close()
open(name, 'w+').write(subst)
substitute_file('include/libtorrent/version.hpp')
substitute_file('CMakeLists.txt')
substitute_file('configure.ac')
subst
|
itute_file('bindings/python/setup.py')
substitute_file('docs/gen_reference_doc.py')
for i in glob.glob('docs/*.rst'):
substitute_file(i)
substitute_file('Jamfile')
|
ctag/cpe453
|
JMRI/jython/Jynstruments/ThrottleWindowToolBar/USBThrottle.jyn/LogitechCordlessRumblePad2.py
|
Python
|
gpl-2.0
| 5,884
| 0.013936
|
print "Loading USBDriver : Logitech Cordless RumblePad 2"
class USBDriver :
def __init__(self):
self.componentNextThrottleFrame = "Hat Switch" # Component for throttle frames browsing
self.valueNextThrottleFrame = 0.5
self.componentPreviousThrottleFrame = "Hat Switch"
|
self.valuePreviousThrottleFrame = 1
self.componentNextRunningThrottleFrame = "" # Component for running throttle frames browsing
self.valueNextRunningThrottleFrame = 0.75
self.componentPreviousRunningThrottleFrame = ""
self.valuePreviousRunningThrottleFrame = 0.25
# From there available only when no throttle is active in
|
current window
self.componentNextRosterBrowse = "Hat Switch" # Component for roster browsing
self.valueNextRoster = 0.75
self.componentPreviousRosterBrowse = "Hat Switch"
self.valuePreviousRoster = 0.25
self.componentRosterSelect = "Button 4" # Component to select a roster
self.valueRosterSelect = 1
# From there available only when a throttle is active in current window
self.componentThrottleRelease = "Button 5" # Component to release current throttle
self.valueThrottleRelease = 1
self.componentSpeed = "X Axis" # Analog axis component for curent throttle speed
self.valueSpeedTrigger = 0.05 # ignore values lower than
self.componentSpeedMultiplier = .5 # multiplier for pad value (negative values to reveerse)
self.componentSpeedIncrease = ""
self.valueSpeedIncrease = 1
self.componentSpeedDecrease = ""
self.valueSpeedDecrease = 1
self.componentDirectionForward = "Z Rotation" # Analog axis component for curent throttle direction
self.valueDirectionForward = -1
self.componentDirectionBackward = "Z Rotation"
self.valueDirectionBackward = 1
self.componentStopSpeed = "Button 7" # Preset speed button stop, double tap will Estop
self.valueStopSpeed = 1
self.componentSlowSpeed = "" # Preset speed button slow
self.valueSlowSpeed = 1
self.componentCruiseSpeed = "" # Preset speed button cruise, double tap will max speed
self.valueCruiseSpeed = 1
self.componentMaxSpeed = "" # Preset speed button max
self.valueMaxSpeed = 1
self.componentF0 = "Button 0" # Function button
self.valueF0 = 1
self.valueF0Off = 0 # off event for non lockable functions
self.componentF1 = "Button 1" # Function button
self.valueF1 = 1
self.valueF1Off = 0
self.componentF2 = "Button 2" # Function button
self.valueF2 = 1
self.valueF2Off = 0
self.componentF3 = "Button 3" # Function button
self.valueF3 = 1
self.valueF3Off = 0
self.componentF4 = "" # Function button
self.valueF4 = 1
self.valueF4Off = 0
self.componentF5 = "" # Function button
self.valueF5 = 1
self.valueF5Off = 0
self.componentF6 = "" # Function button
self.valueF6 = 1
self.valueF6Off = 0
self.componentF7 = "" # Function button
self.valueF7 = 1
self.valueF7Off = 0
self.componentF8 = "" # Function button
self.valueF8 = 1
self.valueF8Off = 0
self.componentF9 = "" # Function button
self.valueF9 = 1
self.valueF9Off = 0
self.componentF10 = "" # Function button
self.valueF10 = 1
self.valueF10Off = 0
self.componentF11 = "" # Function button
self.valueF11 = 1
self.valueF11Off = 0
self.componentF12 = "" # Function button
self.valueF12 = 1
self.valueF12Off = 0
self.componentF13 = "" # Function button
self.valueF13 = 1
self.valueF13Off = 0
self.componentF14 = "" # Function button
self.valueF14 = 1
self.valueF14Off = 0
self.componentF15 = "" # Function button
self.valueF15 = 1
self.valueF15Off = 0
self.componentF16 = "" # Function button
self.valueF16 = 1
self.valueF16Off = 0
self.componentF17 = "" # Function button
self.valueF17 = 1
self.valueF17Off = 0
self.componentF18 = "" # Function button
self.valueF18 = 1
self.valueF18Off = 0
self.componentF19 = "" # Function button
self.valueF19 = 1
self.valueF19Off = 0
self.componentF20 = "" # Function button
self.valueF20 = 1
self.valueF20Off = 0
self.componentF21 = "" # Function button
self.valueF21 = 1
self.valueF21Off = 0
self.componentF22 = "" # Function button
self.valueF22 = 1
self.valueF22Off = 0
self.componentF23 = "" # Function button
self.valueF23 = 1
self.valueF23Off = 0
self.componentF24 = "" # Function button
self.valueF24 = 1
self.valueF24Off = 0
self.componentF25 = "" # Function button
self.valueF25 = 1
self.valueF25Off = 0
self.componentF26 = "" # Function button
self.valueF26 = 1
self.valueF26Off = 0
self.componentF27 = "" # Function button
self.valueF27 = 1
self.valueF27Off = 0
self.componentF28 = "" # Function button
self.valueF28 = 1
self.valueF28Off = 0
self.componentF29 = "" # Function button
self.valueF29 = 1
self.valueF29Off = 0
|
paulsmith/geodjango
|
django/contrib/gis/db/backend/postgis/creation.py
|
Python
|
bsd-3-clause
| 9,184
| 0.005989
|
from django.conf import settings
from django.core.management import call_command
from django.db import connection
from django.test.utils import _set_autocommit, TEST_DATABASE_PREFIX
import os, re, sys
def getstatusoutput(cmd):
"A simpler version of getstatusoutput that works on win32 platforms."
stdin, stdout, stderr = os.popen3(cmd)
output = stdout.read()
if output.endswith('\n'): output = output[:-1]
status = stdin.close()
return status, output
def create_lang(db_name, verbosity=1):
"Sets up the pl/pgsql language on the given database."
# Getting the command-line options for the shell command
options = get_cmd_options(db_name)
# Constructing the 'createlang' command.
createlang_cmd = 'createlang %splpgsql' % options
if verbosity >= 1: print createlang_cmd
# Must have database super-user privileges to execute createlang -- it must
# also be in your path.
status, output = getstatusoutput(createlang_cmd)
# Checking the status of the command, 0 => execution successful
if status:
raise Exception("Error executing 'plpgsql' command: %s\n" % output)
def _create_with_cursor(db_name, verbosity=1, autoclobber=False):
"Creates database with psycopg2 cursor."
# Constructing the necessary SQL to create the database (the DATABASE_USER
# must possess the privileges to create a database)
create_sql = 'CREATE DATABASE %s' % connection.ops.quote_name(db_name)
if settings.DATABASE_USER:
create_sql += ' OWNER %s' % settings.DATABASE_USER
cursor = connection.cursor()
_set_autocommit(connection)
try:
# Trying to create the database first.
cursor.execute(create_sql)
#print create_sql
except Exception, e:
# Drop and recreate, if necessary.
if not autoclobber:
confirm = raw_input("\nIt appears the database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % db_name)
if autoclobber or confirm == 'yes':
if verbosity >= 1: print 'Destroying old spatial database...'
drop_db(db_name)
if verbosity >= 1: print 'Creating new spatial database...'
cursor.execute(create_sql)
else:
raise Exception('Spatial Database Creation canceled.')
foo = _create_with_cursor
created_regex = re.compile(r'^createdb: database creation failed: ERROR: database ".+" already exists')
def _create_with_shell(db_name, verbosity=1, autoclobber=False):
"""
If no spatial database already exists, then using a cursor will not work.
Thus, a `createdb` command will be issued through the shell to bootstrap
creation of the spatial database.
"""
# Getting the command-line options for the shell command
options = get_cmd_options(False)
create_cmd = 'createdb -O %s %s%s' % (settings.DATABASE_USER, options, db_name)
if verbosity >= 1: print create_cmd
# Attempting to create the database.
status, output = getstatusoutput(create_cmd)
if status:
if created_regex.match(output):
if not autoclobber:
confirm = raw_input("\nIt appears the database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % db_name)
if autoclobber or confirm == 'yes':
if verbosity >= 1: print 'Destroying old spatial database...'
drop_cmd = 'dropdb %s%s' % (options, db_name)
status, output = getstatusoutput(drop_cmd)
if status != 0:
raise Exception('Could not drop database %s: %s' % (db_name, output))
if verbosity >= 1: print 'Creating new spatial database...'
status, output = getstatusoutput(create_cmd)
if status != 0:
raise Exception('Could not create database after dropping: %s' % output)
else:
raise Exception('Spatial Database Creation canceled.')
else:
raise Exception('Unknown error occurred in creating database: %s' % output)
def create_spatial_db(test=False, verbosity=1, autoclobber=False, interactive=False):
"Creates a spatial database based on the settings."
# Making sure we're using PostgreSQL and psycopg2
if settings.DATABASE_ENGINE != 'postgresql_psycopg2':
raise Exception('Spatial database creation only supported postgresql_psycopg2 platform.')
# Getting the spatial database name
if test:
db_name = get_spatial_db(test=True)
_create_with_cursor(db_name, verbosity=verbosity, autoclobber=autoclobber)
else:
db_name = get_spatial_db()
_create_with_shell(db_name, verbosity=verbosity, autoclobber=autoclobber)
# Creating the db language, does not need to be done on NT platforms
# since the PostGIS installer enables this capability.
if os.name != 'nt':
create_lang(db_name, verbosity=verbosity)
# Now adding in the PostGIS routines.
load_postgis_sql(db_name, verbosity=verbosity)
if verbosity >= 1: print 'Creation of spatial database %s successful.' % db_name
# Closing the connection
connection.close()
settings.DATABASE_NAME = db_name
# Syncing the database
call_command('syncdb', verbosity=verbosity, interactive=interactive)
def drop_db(db_name=False, test=False):
"""
Drops the given database (defaults to what is returned from
get_spatial_db()). All exceptions are propagated up to the caller.
"""
if not db_name: db_name = get_spatial_db(test=test)
cursor = connection.cursor()
cursor.execute('DROP DATABASE %s' % connection.ops.quote_name(db_name))
def get_cmd_options(db_name):
"Obtains the command-line PostgreSQL connection options for shell commands."
# The db_name parameter is optional
options = ''
if db_name:
options += '-d %s ' % db_name
if settings.DATABASE_USER:
options += '-U %s ' % settings.DATABASE_USER
if settings.DATABASE_HOST:
options += '-h %s ' % settings.DATABASE_HOST
if settings.DATABASE_PORT:
options += '-p %s ' % settings.DATABASE_PORT
return options
def get_spatial_db(test=False):
"""
Returns the name of the spatial database. The 'test' keyword may be set
to return the test spatial database name.
"""
if test:
if settings.TEST_DATABASE_NAME:
test
|
_db_name = settings.TEST_DATABASE_NAME
else:
test_db_name = TEST_DATABASE_PREFIX + settings.DATABASE_NAME
return test_db_name
else:
if not settings.DATABASE_NAME:
raise Exception('must configure DATABASE_NAME in settings.py')
return settings.DATABASE_NAME
def load_postgis_sql(db_
|
name, verbosity=1):
"""
This routine loads up the PostGIS SQL files lwpostgis.sql and
spatial_ref_sys.sql.
"""
# Getting the path to the PostGIS SQL
try:
# POSTGIS_SQL_PATH may be placed in settings to tell GeoDjango where the
# PostGIS SQL files are located. This is especially useful on Win32
# platforms since the output of pg_config looks like "C:/PROGRA~1/..".
sql_path = settings.POSTGIS_SQL_PATH
except AttributeError:
status, sql_path = getstatusoutput('pg_config --sharedir')
if status:
sql_path = '/usr/local/share'
# The PostGIS SQL post-creation files.
lwpostgis_file = os.path.join(sql_path, 'lwpostgis.sql')
srefsys_file = os.path.join(sql_path, 'spatial_ref_sys.sql')
if not os.path.isfile(lwpostgis_file):
raise Exception('Could not find PostGIS function definitions in %s' % lwpostgis_file)
if not os.path.isfile(srefsys_file):
raise Exception('Could not find PostGIS spatial reference system definitions in %s' % srefsys_file)
# Getting the psql command-line options, and command format.
options = get_cmd_options(db_name)
cmd_fmt = 'psql %s-f "%%s"' % options
# Now trying to load up the PostGIS functions
cmd = cmd_fmt % lwpostgis_file
if verbosity >= 1: print cmd
status, output = getstatusoutput(cmd)
if
|
tp7/assfc
|
tests/font_parsing_tests.py
|
Python
|
mit
| 5,846
| 0.004961
|
import logging
import unittest
from functools import reduce
from ass_parser import StyleInfo, UsageData
from font_loader import TTFFont, FontInfo, FontLoader, TTCFont, FontWeight
from tests.common import get_file_in_test_directory
class FontLoaderTests(unittest.TestCase):
def test_returns_all_not_found_fonts(self):
loader = FontLoader(None, True)
data = {StyleInfo('Jorvik', 0, False) : UsageData(), StyleInfo('Random font', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(2, len(not_found))
def test_returns_all_found_fonts(self):
loader = FontLoader([get_file_in_test_directory('')], True)
data = {StyleInfo('Jorvik Informal V2', 0, False) : UsageData(), StyleInfo('Random font', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(1, len(found))
self.assertIn('Jorvik Informal V2', list(found.values())[0].names)
def test_performs_case_insensitive_search(self):
loader = FontLoader([get_file_in_test_directory('')], True)
data = {StyleInfo('JoRvIk INFormAl v2', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(1, len(found))
def test_does_not_add_same_font_twice(self):
loader = FontLoader([get_file_in_test_directory(''), get_file_in_test_directory('')], True)
data = {StyleInfo('Jorvik', 0, False) : UsageData(), StyleInfo('Jorvik informal', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(1, len(found))
def test_loads_at_least_some_system_fonts(self):
loader = FontLoader(None, True)
self.assertTrue(len(loader.fonts) > 0)
def test_finds_all_required_fonts(self):
loader = FontLoader(None, True)
loader.fonts.append(FontInfo(['Arial'], False, False, FontWeight.FW_NORMAL, 'random', '1'))
loader.fonts.append(FontInfo(['Arial Black'], False, False, FontWeight.FW_NORMAL, 'random', '2'))
data = {StyleInfo('Arial', 0, False) : UsageData(), StyleInfo('Arial Black', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(2, len(found))
def test_returns_only_appropriate_font(self):
loader = FontLoader(None, True)
loader.fonts.append(FontInfo(['Arial'], False, False, FontWeight.FW_NORMAL, 'random', '1'))
loader.fonts.append(FontInfo(['Arial Black'], False, False, FontWeight.FW_NORMAL, 'random', '2'))
data = {StyleInfo('Arial', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(1, len(found))
class TTFFontTests(unittest.TestCase):
def test_ttf_name_matches(self):
font = TTFFont(get_file_in_test_directory('seriously.ttf'))
self.assertIn('Seriously', font.get_info().names)
def test_otf_name_matches(self):
font = TTFFont(get_file_in_test_directory('otfpoc.otf'))
self.assertIn('otfpoc', font.get_info().names)
def test_jorvik_v2_name_matches(self):
font = TTFFont(get_file_in_test_directory('Jorvik.ttf'))
self.assertIn('Jorvik Informal V2', font.get_info().names)
def test_detects_italic_only_font(self):
font = TTFFont(get_file_in_test_directory('CaviarDreams_Italic.ttf'))
self.assertIs(font.get_info().italic, True)
def test_detects_bold_only_font(self):
font = TTFFont(get_file_in_test_directory('Caviar Dreams Bold.ttf'))
self.assertIs(font.get_info().bold, True)
def test_detects_italic_bold_font(self):
font = TTFFont(get_file_in_test_directory('CaviarDreams_BoldItalic.ttf'))
self.assertIs(font.get_info().italic, True)
self.assertIs(font.get_info().bold, True)
def test_parses_fonts_with_platform_id_2_strings(self):
font = TTFFont(get_file_in_test_directory('VANTATHI.TTF
|
'))
self.assertIn('Vanta Thin', font.get_info().names)
def test_parses_fonts_with_utf8_platform_id_0_strings(self):
font = TTFFont(get_file_in_test_directory('SUSANNA_.otf'))
self.assertIn('Susanna', font.get_info().names)
def test_detects_bold_weight(self):
|
font = TTFFont(get_file_in_test_directory('Caviar Dreams Bold.ttf'))
self.assertEqual(font.get_info().weight, FontWeight.FW_BOLD)
def test_detects_regular_weight(self):
font = TTFFont(get_file_in_test_directory('Jorvik.ttf'))
self.assertEqual(font.get_info().weight, FontWeight.FW_NORMAL)
def test_detects_medium_weight(self):
font = TTFFont(get_file_in_test_directory('seriously.ttf'))
self.assertEqual(font.get_info().weight, FontWeight.FW_MEDIUM)
class TTCFontTests(unittest.TestCase):
def test_contains_all_names(self):
font = TTCFont(get_file_in_test_directory('jorvik_and_seriously.ttc'))
self.assertIn('Seriously', reduce(lambda names, info: names + info.names, font.get_infos(), []))
self.assertIn('Jorvik Informal V2', reduce(lambda names, info: names + info.names, font.get_infos(), []))
class FontInfoTests(unittest.TestCase):
def test_calculates_md5_on_access(self):
info = FontInfo([], False, False, 0, get_file_in_test_directory('Jorvik.ttf'), None)
self.assertIsNotNone(info.md5)
def test_calculates_correct_md5(self):
info = FontInfo([], False, False, 0, get_file_in_test_directory('Jorvik.ttf'), None)
self.assertEqual(info.md5, '0dae05c47e919281d7ac1e0170e4d3a8')
def test_caches_md5_in_private_field(self):
info = FontInfo([], False, False, 0, get_file_in_test_directory('Jorvik.ttf'), None)
self.assertIsNone(info._FontInfo__md5)
md5 = info.md5
self.assertIsNotNone(info._FontInfo__md5)
|
jhutar/spacewalk
|
backend/cdn_tools/manifest.py
|
Python
|
gpl-2.0
| 8,783
| 0.001139
|
# Copyright (c) 2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import cStringIO
import json
import zipfile
import os
from M2Crypto import X509
import constants
class Manifest(object):
"""Class containing relevant data from RHSM manifest."""
SIGNATURE_NAME = "signature"
INNER_ZIP_NAME = "consumer_export.zip"
ENTITLEMENTS_PATH = "export/entitlements"
CERTIFICATE_PATH = "export/extensions"
PRODUCTS_PATH = "export/products"
def __init__(self, zip_path):
self.all_entitlements = []
self.manifest_repos = {}
self.sat5_certificate = None
# Signature and signed data
self.signature = None
self.data = None
# Open manifest from path
top_zip = None
inner_zip = None
inner_file = None
try:
top_zip = zipfile.ZipFile(zip_path, 'r')
# Fetch inner zip file into memory
try:
# inner_file = top_zip.open(zip_path.split('.zip')[0] + '/' + self.INNER_ZIP_NAME)
inner_file = top_zip.open(self.INNER_ZIP_NAME)
self.data = inner_file.read()
inner_file_data = cStringIO.StringIO(self.data)
signature_file = top_zip.open(self.SIGNATURE_NAME)
self.signature = signature_file.read()
# Open the inner zip file
try:
inner_zip = zipfile.ZipFile(inner_file_data)
self._load_entitlements(inner_zip)
self._extract_certificate(inner_zip)
finally:
if inner_zip is not None:
inner_zip.close()
finally:
if inner_file is not None:
inner_file.close()
finally:
if top_zip is not None:
top_zip.close()
def _extract_certificate(self, zip_file):
files = zip_file.namelist()
certificates_names = []
for f in files:
if f.startswith(self.CERTIFICATE_PATH) and f.endswith(".xml"):
certificates_names.append(f)
if len(certificates_names) >= 1:
# take only first file
cert_file = zip_file.open(certificates_names[0]) # take only first file
self.sat5_certificate = cert_file.read().strip()
cert_file.close()
else:
raise MissingSatelliteCertificateError("Satellite Certificate was not found in manifest.")
def _fill_product_repositories(self, zip_file, product):
product_file = zip_file.open(self.PRODUCTS_PATH + '/' + str(product.get_id()) + '.json')
product_data = json.load(product_file)
product_file.close()
t
|
ry:
for content in product_data['productContent']:
content = content['con
|
tent']
product.add_repository(content['label'], content['contentUrl'])
except KeyError:
print("ERROR: Cannot access required field in product '%s'" % product.get_id())
raise
def _load_entitlements(self, zip_file):
files = zip_file.namelist()
entitlements_files = []
for f in files:
if f.startswith(self.ENTITLEMENTS_PATH) and f.endswith(".json"):
entitlements_files.append(f)
if len(entitlements_files) >= 1:
self.all_entitlements = []
for entitlement_file in entitlements_files:
entitlements = zip_file.open(entitlement_file)
# try block in try block - this is hack for python 2.4 compatibility
# to support finally
try:
try:
data = json.load(entitlements)
# Extract credentials
certs = data['certificates']
if len(certs) != 1:
raise IncorrectEntitlementsFileFormatError(
"ERROR: Single certificate in entitlements file is expected, found: %d"
% len(certs))
cert = certs[0]
credentials = Credentials(data['id'], cert['cert'], cert['key'])
# Extract product IDs
products = []
provided_products = data['pool']['providedProducts']
for provided_product in provided_products:
product = Product(provided_product['productId'])
self._fill_product_repositories(zip_file, product)
products.append(product)
# Skip entitlements not providing any products
if products:
entitlement = Entitlement(products, credentials)
self.all_entitlements.append(entitlement)
except KeyError:
print("ERROR: Cannot access required field in file '%s'" % entitlement_file)
raise
finally:
entitlements.close()
else:
raise IncorrectEntitlementsFileFormatError(
"ERROR: There has to be at least one entitlements file")
def get_all_entitlements(self):
return self.all_entitlements
def get_satellite_certificate(self):
return self.sat5_certificate
def check_signature(self):
if self.signature and self.data:
certs = os.listdir(constants.CANDLEPIN_CA_CERT_DIR)
# At least one certificate has to match
for cert_name in certs:
cert_file = None
try:
cert_file = open(constants.CANDLEPIN_CA_CERT_DIR + '/' + cert_name, 'r')
cert = X509.load_cert_string(cert_file.read())
except (IOError, X509.X509Error):
continue
finally:
if cert_file is not None:
cert_file.close()
pubkey = cert.get_pubkey()
pubkey.reset_context(md='sha256')
pubkey.verify_init()
pubkey.verify_update(self.data)
if pubkey.verify_final(self.signature):
return True
return False
class Entitlement(object):
def __init__(self, products, credentials):
if products and credentials:
self.products = products
self.credentials = credentials
else:
raise IncorrectEntitlementError()
def get_products(self):
return self.products
def get_credentials(self):
return self.credentials
class Credentials(object):
def __init__(self, identifier, cert, key):
if identifier:
self.id = identifier
else:
raise IncorrectCredentialsError(
"ERROR: ID of credentials has to be defined"
)
if cert and key:
self.cert = cert
self.key = key
else:
raise IncorrectCredentialsError(
"ERROR: Trying to create object with cert = %s and key = %s"
% (cert, key)
)
def get_id(self):
return self.id
def get_cert(self):
return self.cert
def get_key(self):
return self.key
class Product(object):
def __init__(self, identifier):
try:
self.id = int(identifier)
except ValueError:
raise IncorrectProductError(
"ERROR: Invalid product
|
vladimir-v-diaz/securesystemslib
|
securesystemslib/_vendor/ed25519/ed25519.py
|
Python
|
mit
| 7,618
| 0.000656
|
# ed25519.py - Optimized version of the reference implementation of Ed25519
#
# Written in 2011? by Daniel J. Bernstein <djb@cr.yp.to>
# 2013 by Donald Stufft <donald@stufft.io>
# 2013 by Alex Gaynor <alex.gayn
|
or@gmail.com>
# 2013 by Greg Price <price@mit.edu>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>
|
.
"""
NB: This code is not safe for use with secret keys or secret data.
The only safe use of this code is for verifying signatures on public messages.
Functions for computing the public key of a secret key and for signing
a message are included, namely publickey_unsafe and signature_unsafe,
for testing purposes only.
The root of the problem is that Python's long-integer arithmetic is
not designed for use in cryptography. Specifically, it may take more
or less time to execute an operation depending on the values of the
inputs, and its memory access patterns may also depend on the inputs.
This opens it to timing and cache side-channel attacks which can
disclose data to an attacker. We rely on Python's long-integer
arithmetic, so we cannot handle secrets without risking their disclosure.
"""
import hashlib
import operator
import sys
__version__ = "1.0.dev0"
# Useful for very coarse version differentiation.
PY3 = sys.version_info[0] == 3
if PY3:
indexbytes = operator.getitem
intlist2bytes = bytes
int2byte = operator.methodcaller("to_bytes", 1, "big")
else:
int2byte = chr
range = xrange
def indexbytes(buf, i):
return ord(buf[i])
def intlist2bytes(l):
return b"".join(chr(c) for c in l)
b = 256
q = 2 ** 255 - 19
l = 2 ** 252 + 27742317777372353535851937790883648493
def H(m):
return hashlib.sha512(m).digest()
def pow2(x, p):
"""== pow(x, 2**p, q)"""
while p > 0:
x = x * x % q
p -= 1
return x
def inv(z):
"""$= z^{-1} \mod q$, for z != 0"""
# Adapted from curve25519_athlon.c in djb's Curve25519.
z2 = z * z % q # 2
z9 = pow2(z2, 2) * z % q # 9
z11 = z9 * z2 % q # 11
z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0
z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0
z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ...
z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q
z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q
z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q
z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q
z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0
return pow2(z2_250_0, 5) * z11 % q # 2^255 - 2^5 + 11 = q - 2
d = -121665 * inv(121666) % q
I = pow(2, (q - 1) // 4, q)
def xrecover(y):
xx = (y * y - 1) * inv(d * y * y + 1)
x = pow(xx, (q + 3) // 8, q)
if (x * x - xx) % q != 0:
x = (x * I) % q
if x % 2 != 0:
x = q-x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = (Bx % q, By % q, 1, (Bx * By) % q)
ident = (0, 1, 1, 0)
def edwards_add(P, Q):
# This is formula sequence 'addition-add-2008-hwcd-3' from
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
(x1, y1, z1, t1) = P
(x2, y2, z2, t2) = Q
a = (y1-x1)*(y2-x2) % q
b = (y1+x1)*(y2+x2) % q
c = t1*2*d*t2 % q
dd = z1*2*z2 % q
e = b - a
f = dd - c
g = dd + c
h = b + a
x3 = e*f
y3 = g*h
t3 = e*h
z3 = f*g
return (x3 % q, y3 % q, z3 % q, t3 % q)
def edwards_double(P):
# This is formula sequence 'dbl-2008-hwcd' from
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
(x1, y1, z1, t1) = P
a = x1*x1 % q
b = y1*y1 % q
c = 2*z1*z1 % q
# dd = -a
e = ((x1+y1)*(x1+y1) - a - b) % q
g = -a + b # dd + b
f = g - c
h = -a - b # dd - b
x3 = e*f
y3 = g*h
t3 = e*h
z3 = f*g
return (x3 % q, y3 % q, z3 % q, t3 % q)
def scalarmult(P, e):
if e == 0:
return ident
Q = scalarmult(P, e // 2)
Q = edwards_double(Q)
if e & 1:
Q = edwards_add(Q, P)
return Q
# Bpow[i] == scalarmult(B, 2**i)
Bpow = []
def make_Bpow():
P = B
for i in range(253):
Bpow.append(P)
P = edwards_double(P)
make_Bpow()
def scalarmult_B(e):
"""
Implements scalarmult(B, e) more efficiently.
"""
# scalarmult(B, l) is the identity
e = e % l
P = ident
for i in range(253):
if e & 1:
P = edwards_add(P, Bpow[i])
e = e // 2
assert e == 0, e
return P
def encodeint(y):
bits = [(y >> i) & 1 for i in range(b)]
return b''.join([
int2byte(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b//8)
])
def encodepoint(P):
(x, y, z, t) = P
zi = inv(z)
x = (x * zi) % q
y = (y * zi) % q
bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1]
return b''.join([
int2byte(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b // 8)
])
def bit(h, i):
return (indexbytes(h, i // 8) >> (i % 8)) & 1
def publickey_unsafe(sk):
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = 2 ** (b - 2) + sum(2 ** i * bit(h, i) for i in range(3, b - 2))
A = scalarmult_B(a)
return encodepoint(A)
def Hint(m):
h = H(m)
return sum(2 ** i * bit(h, i) for i in range(2 * b))
def signature_unsafe(m, sk, pk):
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = 2 ** (b - 2) + sum(2 ** i * bit(h, i) for i in range(3, b - 2))
r = Hint(
intlist2bytes([indexbytes(h, j) for j in range(b // 8, b // 4)]) + m
)
R = scalarmult_B(r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P):
(x, y, z, t) = P
return (z % q != 0 and
x*y % q == z*t % q and
(y*y - x*x - z*z - d*t*t) % q == 0)
def decodeint(s):
return sum(2 ** i * bit(s, i) for i in range(0, b))
def decodepoint(s):
y = sum(2 ** i * bit(s, i) for i in range(0, b - 1))
x = xrecover(y)
if x & 1 != bit(s, b-1):
x = q - x
P = (x, y, 1, (x*y) % q)
if not isoncurve(P):
raise ValueError("decoding point that is not on curve")
return P
class SignatureMismatch(Exception):
pass
def checkvalid(s, m, pk):
"""
Not safe to use when any argument is secret.
See module docstring. This function should be used only for
verifying public signatures of public messages.
"""
if len(s) != b // 4:
raise ValueError("signature length is wrong")
if len(pk) != b // 8:
raise ValueError("public-key length is wrong")
R = decodepoint(s[:b // 8])
A = decodepoint(pk)
S = decodeint(s[b // 8:b // 4])
h = Hint(encodepoint(R) + pk + m)
(x1, y1, z1, t1) = P = scalarmult_B(S)
(x2, y2, z2, t2) = Q = edwards_add(R, scalarmult(A, h))
if (not isoncurve(P) or not isoncurve(Q) or
(x1*z2 - x2*z1) % q != 0 or (y1*z2 - y2*z1) % q != 0):
raise SignatureMismatch("signature does not pass verification")
|
ConservationInternational/ldmp-qgis-plugin
|
LDMP/calculate_numba.py
|
Python
|
gpl-2.0
| 4,469
| 0.000671
|
import os
import json
import numpy as np
try:
from numba.pycc import CC
cc = CC('calculate_numba')
except ImportError:
# Will use these
|
as regular Python functions if numba is not present.
class CCSubstitute(
|
object):
# Make a cc.export that doesn't do anything
def export(*args, **kwargs):
def wrapper(func):
return func
return wrapper
cc = CCSubstitute()
@cc.export('ldn_recode_traj', 'i2[:,:](i2[:,:])')
def ldn_recode_traj(x):
# Recode trajectory into deg, stable, imp. Capture trends that are at least
# 95% significant.
#
# Remember that traj is coded as:
# -3: 99% signif decline
# -2: 95% signif decline
# -1: 90% signif decline
# 0: stable
# 1: 90% signif increase
# 2: 95% signif increase
# 3: 99% signif increase
shp = x.shape
x = x.ravel()
x[(x >= -1) & (x <= 1)] = 0
x[(x >= -3) & (x < -1)] = -1
# -1 and 1 are not signif at 95%, so stable
x[(x > 1) & (x <= 3)] = 1
return(np.reshape(x, shp))
@cc.export('ldn_recode_state', 'i2[:,:](i2[:,:])')
def ldn_recode_state(x):
# Recode state into deg, stable, imp. Note the >= -10 is so no data
# isn't coded as degradation. More than two changes in class is defined
# as degradation in state.
shp = x.shape
x = x.ravel()
x[(x > -2) & (x < 2)] = 0
x[(x >= -10) & (x <= -2)] = -1
x[x >= 2] = 1
return(np.reshape(x, shp))
@cc.export('ldn_make_prod5', 'i2[:,:](i2[:,:], i2[:,:], i2[:,:] ,i2[:,:])')
def ldn_make_prod5(traj, state, perf, mask):
# Coding of LPD (prod5)
# 1: declining
# 2: early signs of decline
# 3: stable but stressed
# 4: stable
# 5: improving
# -32768: no data
# Declining = 1
shp = traj.shape
traj = traj.ravel()
state = state.ravel()
perf = perf.ravel()
mask = mask.ravel()
x = traj.copy()
x[traj == -1] = 1
# Stable = 4
x[traj == 0] = 4
# Improving = 5
x[traj == 1] = 5
# Stable due to agreement in perf and state but positive trajectory
x[(traj == 1) & (state == -1) & (perf == -1)] = 4
# Stable but stressed
x[(traj == 0) & (state == 0) & (perf == -1)] = 3
# Early signs of decline
x[(traj == 0) & (state == -1) & (perf == 0)] = 2
# Ensure NAs carry over to productivity indicator layer
x[(traj == -32768) | (perf == -32768) | (state == -32768)] = -32768
# Ensure masked areas carry over to productivity indicator
x[mask == -32767] = -32767
return(np.reshape(x, shp))
@cc.export('ldn_total_by_trans', '(f4[:,:], i2[:,:], f4[:,:])')
def ldn_total_by_trans(d, trans_a, cell_areas):
"""Calculates a total table for an array"""
d = d.ravel()
trans_a = trans_a.ravel()
trans = np.unique(trans_a)
cell_areas = cell_areas.ravel()
# Values less than zero are missing data flags
d[d < 0] = 0
totals = np.zeros(trans.size, dtype=np.float32)
for i in range(trans.size):
# Only sum values for this_trans, and where soc has a valid value
# (negative values are missing data flags)
vals = d[trans_a == trans[i]] * cell_areas[trans_a == trans[i]]
totals[i] += np.sum(vals)
return trans, totals
# @cc.export('ldn_total_by_trans_merge', '(f4[:], i2[:], f4[:], i2[:])')
# def ldn_total_by_trans_merge(total1, trans1, total2, trans2):
# """Calculates a total table for an array"""
# # Combine past totals with these totals
# trans = np.unique(np.concatenate((trans1, trans2)))
# totals = np.zeros(trans.size, dtype=np.float32)
# for i in range(trans.size):
# trans1_loc = np.where(trans1 == trans[i])[0]
# trans2_loc = np.where(trans2 == trans[i])[0]
# if trans1_loc.size > 0:
# totals[i] = totals[i] + total1[trans1_loc[0]]
# if trans2_loc.size > 0:
# totals[i] = totals[i] + total2[trans2_loc[0]]
# return trans, totals
@cc.export('ldn_total_deg', 'f4[4](i2[:,:], b1[:,:], f4[:,:])')
def ldn_total_deg(x, water, cell_areas):
"""Calculates a total table for an array"""
x = x.ravel()
cell_areas = cell_areas.ravel()
x[water.ravel()] = -32767
out = np.zeros((4), dtype=np.float32)
out[0] = np.sum(cell_areas[x == 1])
out[1] = np.sum(cell_areas[x == 0])
out[2] = np.sum(cell_areas[x == -1])
out[3] = np.sum(cell_areas[x == -32768])
return out
if __name__ == "__main__":
cc.compile()
|
isrealconsulting/codepy27
|
main.py
|
Python
|
apache-2.0
| 3,227
| 0.00093
|
#!/usr/bin/env python
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sample Google App Engine application that demonstrates how to send mail using
Mailgun.
For more information, see README.md.
"""
from urllib import urlencode
import httplib2
import webapp2
# Your Mailgun Domain Name
MAILGUN_DOMAIN_NAME = 'isrealconsulting.com'
# Your Mailgun API key
MAILGUN_API_KEY = 'key-1ffd59a9c3afdcf762f22b21129c13f6'
# [START simple_message]
def send_simple_message(recipient):
http = httplib2.Http()
http.add_credentials('api', MAILGUN_API_KEY)
url = 'https://api.mailgun.net/v3/{}/messages'.
|
format(MAILGUN_DOMAIN_NAME)
data = {
'from': 'Isreal Consulting Webmaster <webmaster@{}>'.format(MAILGUN_DOMAIN_NAME),
'to': recipient,
'subject': 'This is an example email from ICLLC code site codepy',
'text': 'Test message f
|
rom codepy-1'
}
resp, content = http.request(url, 'POST', urlencode(data))
if resp.status != 200:
raise RuntimeError(
'Mailgun API error: {} {}'.format(resp.status, content))
# [END simple_message]
# [START complex_message]
def send_complex_message(recipient):
http = httplib2.Http()
http.add_credentials('api', MAILGUN_API_KEY)
url = 'https://api.mailgun.net/v3/{}/messages'.format(MAILGUN_DOMAIN_NAME)
data = {
'from': 'Isreal Consulting Webmaster <webmaster@{}>'.format(MAILGUN_DOMAIN_NAME),
'to': recipient,
'subject': 'This is an example email from ICLLC code site codepy',
'text': 'Test message from codepy-1',
'html': '<html>HTML <strong>version</strong> of the body</html>'
}
resp, content = http.request(url, 'POST', urlencode(data))
if resp.status != 200:
raise RuntimeError(
'Mailgun API error: {} {}'.format(resp.status, content))
# [END complex_message]
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.content_type = 'text/html'
self.response.write("""
<!doctype html>
<html><head><title>Isreal Consulting</title></head>
<body>
<form method="POST">
<input type="text" name="recipient" placeholder="Enter recipient email">
<input type="submit" name="submit" value="Send simple email">
<input type="submit" name="submit" value="Send complex email">
</form>
</body></html>
""")
def post(self):
recipient = self.request.get('recipient')
action = self.request.get('submit')
if action == 'Send simple email':
send_simple_message(recipient)
else:
send_complex_message(recipient)
self.response.write('Mail sent')
app = webapp2.WSGIApplication([
('/', MainPage)
], debug=True)
|
wtsi-hgi/cookie-monster
|
cookiemonster/tests/processor/_mocks.py
|
Python
|
gpl-3.0
| 2,448
| 0.002451
|
"""
Legalese
--------
Copyright (c) 2015, 2016 Genome Research Ltd.
Author: Colin Nolan <cn13@sanger.ac.uk>
This file is part of Cookie Monster.
Cookie Monster is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from unittest.mock import MagicMock
from hgicommon.mixable import Priority
from cookiemonster.cookiejar import CookieJar
from cookiemonster.cookiejar.in_memory_cookiejar import InMemoryCookieJar
from cookiemonster.processor.models import Rule
def create_mock_rule(priority: int=Priority.MIN_PRIORITY) -> Rule:
"""
Creates a mock `Rule
|
` object.
:param priority: (optional) the priority of the rule
:return: the created rule
"""
return Rule(
lambda file_update, data_environment: True,
lambda file_update, data_environment: True,
"my_rule",
priority=priority
)
def create_magic_mock_cookie_jar() -> CookieJar:
"""
Creates a magic mock Cookie
|
Jar - has the implementation of a CookieJar all methods are implemented using magic mocks
and therefore their usage is recorded.
:return: the created magic mock
"""
cookie_jar = InMemoryCookieJar()
original_get_next_for_processing = cookie_jar.get_next_for_processing
original_enrich_cookie = cookie_jar.enrich_cookie
original_mark_as_failed = cookie_jar.mark_as_complete
original_mark_as_completed = cookie_jar.mark_as_complete
original_mark_as_reprocess = cookie_jar.mark_for_processing
cookie_jar.get_next_for_processing = MagicMock(side_effect=original_get_next_for_processing)
cookie_jar.enrich_cookie = MagicMock(side_effect=original_enrich_cookie)
cookie_jar.mark_as_failed = MagicMock(side_effect=original_mark_as_failed)
cookie_jar.mark_as_complete = MagicMock(side_effect=original_mark_as_completed)
cookie_jar.mark_for_processing = MagicMock(side_effect=original_mark_as_reprocess)
return cookie_jar
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.