text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
import json
def check_metric(metric, *args, **kwargs):
metric = mx.metric.create(metric, *args, **kwargs)
str_metric = json.dumps(metric.get_config())
metric2 = mx.metric.create(str_metric)
assert metric.get_config() == metric2.get_config()
def test_metrics():
check_metric('acc', axis=0)
check_metric('f1')
check_metric('perplexity', -1)
check_metric('pearsonr')
check_metric('nll_loss')
composite = mx.metric.create(['acc', 'f1'])
check_metric(composite)
def test_nll_loss():
metric = mx.metric.create('nll_loss')
pred = mx.nd.array([[0.2, 0.3, 0.5], [0.6, 0.1, 0.3]])
label = mx.nd.array([2, 1])
metric.update([label], [pred])
_, loss = metric.get()
expected_loss = 0.0
expected_loss = -(np.log(pred[0][2].asscalar()) + np.log(pred[1][1].asscalar())) / 2
assert loss == expected_loss
if __name__ == '__main__':
import nose
nose.runmodule()
|
Mega-DatA-Lab/mxnet
|
tests/python/unittest/test_metric.py
|
Python
|
apache-2.0
| 1,757
| 0.002277
|
import asyncio
import inspect
import logging
from typing import List, Tuple, Callable, NamedTuple
from lightbus.schema.schema import Parameter
from lightbus.message import EventMessage
from lightbus.client.subclients.base import BaseSubClient
from lightbus.client.utilities import validate_event_or_rpc_name, queue_exception_checker, OnError
from lightbus.client.validator import validate_outgoing, validate_incoming
from lightbus.exceptions import (
UnknownApi,
EventNotFound,
InvalidEventArguments,
InvalidEventListener,
ListenersAlreadyStarted,
DuplicateListenerName,
)
from lightbus.log import L, Bold
from lightbus.client.commands import (
SendEventCommand,
AcknowledgeEventCommand,
ConsumeEventsCommand,
CloseCommand,
)
from lightbus.utilities.async_tools import run_user_provided_callable, cancel_and_log_exceptions
from lightbus.utilities.internal_queue import InternalQueue
from lightbus.utilities.casting import cast_to_signature
from lightbus.utilities.deforming import deform_to_bus
from lightbus.utilities.singledispatch import singledispatchmethod
logger = logging.getLogger(__name__)
class EventClient(BaseSubClient):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._event_listeners: List[Listener] = []
self._event_listener_tasks = set()
self._listeners_started = False
async def fire_event(
self, api_name, name, kwargs: dict = None, options: dict = None
) -> EventMessage:
kwargs = kwargs or {}
try:
api = self.api_registry.get(api_name)
except UnknownApi:
raise UnknownApi(
"Lightbus tried to fire the event {api_name}.{name}, but no API named {api_name}"
" was found in the registry. An API being in the registry implies you are an"
" authority on that API. Therefore, Lightbus requires the API to be in the registry"
" as it is a bad idea to fire events on behalf of remote APIs. However, this could"
" also be caused by a typo in the API name or event name, or be because the API"
" class has not been registered using bus.client.register_api(). ".format(
**locals()
)
)
validate_event_or_rpc_name(api_name, "event", name)
try:
event = api.get_event(name)
except EventNotFound:
raise EventNotFound(
"Lightbus tried to fire the event {api_name}.{name}, but the API {api_name} does"
" not seem to contain an event named {name}. You may need to define the event, you"
" may also be using the incorrect API. Also check for typos.".format(**locals())
)
p: Parameter
parameter_names = {p.name if isinstance(p, Parameter) else p for p in event.parameters}
required_parameter_names = {
p.name if isinstance(p, Parameter) else p
for p in event.parameters
if getattr(p, "is_required", True)
}
if required_parameter_names and not required_parameter_names.issubset(set(kwargs.keys())):
raise InvalidEventArguments(
"Missing required arguments when firing event {}.{}. Attempted to fire event with "
"{} arguments: {}. Event requires {}: {}".format(
api_name,
name,
len(kwargs),
sorted(kwargs.keys()),
len(parameter_names),
sorted(parameter_names),
)
)
extra_arguments = set(kwargs.keys()) - parameter_names
if extra_arguments:
raise InvalidEventArguments(
"Unexpected argument supplied when firing event {}.{}. Attempted to fire event with"
" {} arguments: {}. Unexpected argument(s): {}".format(
api_name, name, len(kwargs), sorted(kwargs.keys()), sorted(extra_arguments),
)
)
kwargs = deform_to_bus(kwargs)
event_message = EventMessage(
api_name=api.meta.name, event_name=name, kwargs=kwargs, version=api.meta.version
)
validate_outgoing(self.config, self.schema, event_message)
await self.hook_registry.execute("before_event_sent", event_message=event_message)
logger.info(L("📤 Sending event {}.{}".format(Bold(api_name), Bold(name))))
await self.producer.send(SendEventCommand(message=event_message, options=options)).wait()
await self.hook_registry.execute("after_event_sent", event_message=event_message)
return event_message
def listen(
self,
events: List[Tuple[str, str]],
listener: Callable,
listener_name: str,
options: dict = None,
on_error: OnError = OnError.SHUTDOWN,
):
if self._listeners_started:
# We are actually technically able to support starting listeners after worker
# startup, but it seems like it is a bad idea and a bit of an edge case.
# We may revisit this if sufficient demand arises.
raise ListenersAlreadyStarted(
"You are trying to register a new listener after the worker has started running."
" Listeners should be setup in your @bus.client.on_start() hook, in your bus.py"
" file."
)
sanity_check_listener(listener)
for listener_api_name, _ in events:
duplicate_listener = self.get_event_listener(listener_api_name, listener_name)
if duplicate_listener:
raise DuplicateListenerName(
f"A listener with name '{listener_name}' is already registered for API"
f" '{listener_api_name}'. You cannot have multiple listeners with the same name"
" for a given API. Rename one of your listeners to resolve this problem."
)
for api_name, name in events:
validate_event_or_rpc_name(api_name, "event", name)
self._event_listeners.append(
Listener(
callable=listener,
options=options or {},
events=events,
name=listener_name,
on_error=on_error,
)
)
def get_event_listener(self, api_name: str, listener_name: str):
for listener in self._event_listeners:
if listener.name == listener_name:
for listener_api_name, _ in listener.events:
if listener_api_name == api_name:
return listener
return None
async def _on_message(
self, event_message: EventMessage, listener: Callable, options: dict, on_error: OnError
):
# TODO: Check events match those requested
logger.info(
L(
"📩 Received event {}.{} with ID {}".format(
Bold(event_message.api_name), Bold(event_message.event_name), event_message.id
)
)
)
validate_incoming(self.config, self.schema, event_message)
await self.hook_registry.execute("before_event_execution", event_message=event_message)
if self.config.api(event_message.api_name).cast_values:
parameters = cast_to_signature(parameters=event_message.kwargs, callable=listener)
else:
parameters = event_message.kwargs
# Call the listener.
# Pass the event message as a positional argument,
# thereby allowing listeners to have flexibility in the argument names.
# (And therefore allowing listeners to use the `event` parameter themselves)
if on_error == OnError.SHUTDOWN:
# Run the callback in the queue_exception_checker(). This will
# put any errors into Lightbus' error queue, and therefore
# cause a shutdown
await queue_exception_checker(
run_user_provided_callable(listener, args=[event_message], kwargs=parameters),
self.error_queue,
help=(
f"An error occurred while {listener} was handling an event. Lightbus will now"
" shutdown. If you wish to continue you can use the on_error parameter when"
" setting up your event. For example:\n\n bus.my_api.my_event.listen(fn,"
" listener_name='example', on_error=lightbus.OnError.ACKNOWLEDGE_AND_LOG)"
),
)
elif on_error == on_error.ACKNOWLEDGE_AND_LOG:
try:
await listener(event_message, **parameters)
except asyncio.CancelledError:
raise
except Exception as e:
# Log here. Acknowledgement will follow in below
logger.exception(e)
# Acknowledge the successfully processed message
await self.producer.send(
AcknowledgeEventCommand(message=event_message, options=options)
).wait()
await self.hook_registry.execute("after_event_execution", event_message=event_message)
async def close(self):
await super().close()
await cancel_and_log_exceptions(*self._event_listener_tasks)
await self.producer.send(CloseCommand()).wait()
await self.consumer.close()
await self.producer.close()
@singledispatchmethod
async def handle(self, command):
raise NotImplementedError(f"Did not recognise command {command.__class__.__name__}")
async def start_registered_listeners(self):
"""Start all listeners which have been previously registered via listen()"""
self._listeners_started = True
for listener in self._event_listeners:
await self._start_listener(listener)
async def _start_listener(self, listener: "Listener"):
# Setting the maxsize to 1 ensures the transport cannot load
# messages faster than we can consume them
queue: InternalQueue[EventMessage] = InternalQueue(maxsize=1)
async def consume_events():
while True:
logger.debug("Event listener now waiting for event on the internal queue")
event_message = await queue.get()
logger.debug(
"Event listener has now received an event on the internal queue, processing now"
)
await self._on_message(
event_message=event_message,
listener=listener.callable,
options=listener.options,
on_error=listener.on_error,
)
queue.task_done()
# Start the consume_events() consumer running
task = asyncio.ensure_future(queue_exception_checker(consume_events(), self.error_queue))
self._event_listener_tasks.add(task)
await self.producer.send(
ConsumeEventsCommand(
events=listener.events,
destination_queue=queue,
listener_name=listener.name,
options=listener.options,
)
).wait()
class Listener(NamedTuple):
callable: Callable
options: dict
events: List[Tuple[str, str]]
name: str
on_error: OnError
def sanity_check_listener(listener):
if not callable(listener):
raise InvalidEventListener(
f"The specified event listener {listener} is not callable. Perhaps you called the"
" function rather than passing the function itself?"
)
total_positional_args = 0
has_variable_positional_args = False # Eg: *args
for parameter in inspect.signature(listener).parameters.values():
if parameter.kind in (
inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD,
):
total_positional_args += 1
elif parameter.kind == inspect.Parameter.VAR_POSITIONAL:
has_variable_positional_args = True
if has_variable_positional_args:
return
if not total_positional_args:
raise InvalidEventListener(
f"The specified event listener {listener} must take at one positional argument. "
"This will be the event message. For example: "
"my_listener(event, other, ...)"
)
|
adamcharnock/lightbus
|
lightbus/client/subclients/event.py
|
Python
|
apache-2.0
| 12,494
| 0.003764
|
def extractShibbsdenCom(item):
'''
Parser for 'shibbsden.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('GOOD CHILD', 'Reborn as a Good Child', 'translated'),
('LUCKY CAT', 'I am the Lucky Cat of an MMORPG', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractShibbsdenCom.py
|
Python
|
bsd-3-clause
| 714
| 0.029412
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("projects", "doctype", "project")
frappe.db.sql("""
update `tabProject` p
set total_sales_amount = ifnull((select sum(base_grand_total)
from `tabSales Order` where project=p.name and docstatus=1), 0)
""")
|
ovresko/erpnext
|
erpnext/patches/v8_0/update_sales_cost_in_project.py
|
Python
|
gpl-3.0
| 303
| 0.026403
|
import logging
from django.utils.html import format_html
import django_tables2 as tables
from django_tables2.rows import BoundPinnedRow, BoundRow
logger = logging.getLogger(__name__)
# A cheat to force BoundPinnedRows to use the same rendering as BoundRows
# otherwise links don't work
# BoundPinnedRow._get_and_render_with = BoundRow._get_and_render_with
class MultiLinkColumn(tables.RelatedLinkColumn):
"""
Like RelatedLinkColumn but allows multiple choices of accessor to be
rendered in a hierarchy, e.g.
accessors = ['foo.bar', 'baz.bof']
text = '{instance.number}: {instance}'
In this case if 'foo.bar' resolves, it will be rendered. Otherwise
'baz.bof' will be tested to resolve, and so on. If nothing renders,
the column will be blank. The text string will resolve using instance.
"""
def __init__(self, accessors, **kwargs):
"""Here we force order by the accessors. By default MultiLinkColumns
have empty_values: () to force calculation every time.
"""
defaults = {
'order_by': accessors,
'empty_values': (),
}
defaults.update(**kwargs)
super().__init__(**defaults)
self.accessors = [tables.A(a) for a in accessors]
def compose_url(self, record, bound_column):
"""Resolve the first accessor which resolves. """
for a in self.accessors:
try:
return a.resolve(record).get_absolute_url()
except (ValueError, AttributeError):
continue
return ""
def text_value(self, record, value):
"""If self.text is set, it will be used as a format string for the
instance returned by the accessor with the keyword `instance`.
"""
for a in self.accessors:
try:
instance = a.resolve(record)
if instance is None:
raise ValueError
except ValueError:
continue
# Use self.text as a format string
if self.text:
return self.text.format(instance=instance, record=record,
value=value)
else:
return str(instance)
# Finally if no accessors were resolved, return value or a blank string
# return super().text_value(record, value)
return value or ""
class XeroLinkColumn(tables.Column):
"""Renders a badge link to the objects record in xero."""
def render(self, value, record=None):
if record.xero_id:
return format_html(
'<span class="badge progress-bar-info">'
'<a class="alert-link" role="button" target="_blank" '
'href="{href}">View in Xero</a></span>',
href=record.get_xero_url()
)
class BaseTable(tables.Table):
class Meta:
attrs = {"class": "table table-bordered table-striped table-hover "
"table-condensed"}
# @classmethod
# def set_header_color(cls, color):
# """
# Sets all column headers to have this background colour.
# """
# for column in cls.base_columns.values():
# try:
# column.attrs['th'].update(
# {'style': f'background-color:{color};'})
# except KeyError:
# column.attrs['th'] = {'style': f'background-color:{color};'}
def set_header_color(self, color):
"""
Sets all column headers to have this background colour.
"""
for column in self.columns.columns.values():
try:
column.column.attrs['th'].update(
{'style': f'background-color:{color};'})
except KeyError:
column.column.attrs['th'] = {
'style': f'background-color:{color};'}
class ModelTable(BaseTable):
class Meta(BaseTable.Meta):
exclude = ('id',)
class CurrencyColumn(tables.Column):
"""Render a table column as GBP."""
def render(self, value):
return f'£{value:,.2f}'
class NumberColumn(tables.Column):
"""Only render decimal places if necessary."""
def render(self, value):
if value is not None:
return f'{value:n}'
class ColorColumn(tables.Column):
"""Render the colour in a box."""
def __init__(self, *args, **kwargs):
"""This will ignore other attrs passed in."""
kwargs.setdefault('attrs', {'td': {'class': "small-width text-center"}})
super().__init__(*args, **kwargs)
def render(self, value):
if value:
return format_html(
'<div class="color-box" style="background:{};"></div>', value)
|
sdolemelipone/django-crypsis
|
crypsis/tables.py
|
Python
|
gpl-3.0
| 4,778
| 0.000209
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
QAD Quantum Aided Design plugin ok
classe per gestire il map tool in ambito del comando array
-------------------
begin : 2016-05-31
copyright : iiiii
email : hhhhh
developers : bbbbb aaaaa ggggg
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from .. import qad_utils
from ..qad_variables import QadVariables
from ..qad_getpoint import QadGetPoint, QadGetPointSelectionModeEnum, QadGetPointDrawModeEnum
from ..qad_highlight import QadHighlight
from ..qad_dim import QadDimStyles, appendDimEntityIfNotExisting
from ..qad_entity import QadCacheEntitySetIterator, QadEntityTypeEnum
from .. import qad_array_fun
#===============================================================================
# Qad_array_maptool_ModeEnum class.
#===============================================================================
class Qad_array_maptool_ModeEnum():
# non si richiede niente
NONE = 0
# si richiede il punto base
ASK_FOR_BASE_PT = 1
# si richiede il primo punto per la distanza tra colonne
ASK_FOR_COLUMN_SPACE_FIRST_PT = 2
# si richiede il primo punto per la dimensione della cella
ASK_FOR_1PT_CELL = 3
# si richiede il psecondo punto per la dimensione della cella
ASK_FOR_2PT_CELL = 4
# si richiede il primo punto per la distanza tra righe
ASK_FOR_ROW_SPACE_FIRST_PT = 5
#===============================================================================
# Qad_array_maptool class
#===============================================================================
class Qad_array_maptool(QadGetPoint):
def __init__(self, plugIn):
QadGetPoint.__init__(self, plugIn)
self.cacheEntitySet = None
self.basePt = None
self.arrayType = None
self.distanceBetweenRows = None
self.distanceBetweenCols = None
self.itemsRotation = None
# serie rettangolare
self.rectangleAngle = None
self.rectangleCols = None
self.rectangleRows = None
self.firstPt = None
# serie traiettoria
self.pathTangentDirection = None
self.pathRows = None
self.pathItemsNumber = None
self.pathPolyline = None
# serie polare
self.centerPt = None
self.polarItemsNumber = None
self.polarAngleBetween = None
self.polarRows = None
self.__highlight = QadHighlight(self.canvas)
def hidePointMapToolMarkers(self):
QadGetPoint.hidePointMapToolMarkers(self)
self.__highlight.hide()
def showPointMapToolMarkers(self):
QadGetPoint.showPointMapToolMarkers(self)
self.__highlight.show()
def clear(self):
QadGetPoint.clear(self)
self.__highlight.reset()
self.mode = None
#============================================================================
# doRectangleArray
#============================================================================
def doRectangleArray(self):
self.__highlight.reset()
dimElaboratedList = [] # lista delle quotature già elaborate
entityIterator = QadCacheEntitySetIterator(self.cacheEntitySet)
for entity in entityIterator:
qadGeom = entity.getQadGeom().copy() # così inizializzo le info qad
# verifico se l'entità appartiene ad uno stile di quotatura
dimEntity = QadDimStyles.getDimEntity(entity)
if dimEntity is not None:
if appendDimEntityIfNotExisting(dimElaboratedList, dimEntity) == False: # quota già elaborata
continue
entity = dimEntity
if qad_array_fun.arrayRectangleEntity(self.plugIn, entity, self.basePt, self.rectangleRows, self.rectangleCols, \
self.distanceBetweenRows, self.distanceBetweenCols, self.rectangleAngle, self.itemsRotation,
False, self.__highlight) == False:
return
#============================================================================
# doPathArray
#============================================================================
def doPathArray(self):
self.__highlight.reset()
dimElaboratedList = [] # lista delle quotature già elaborate
entityIterator = QadCacheEntitySetIterator(self.cacheEntitySet)
for entity in entityIterator:
qadGeom = entity.getQadGeom().copy() # così inizializzo le info qad
# verifico se l'entità appartiene ad uno stile di quotatura
dimEntity = QadDimStyles.getDimEntity(entity)
if dimEntity is not None:
if appendDimEntityIfNotExisting(dimElaboratedList, dimEntity) == False: # quota già elaborata
continue
entity = dimEntity
if qad_array_fun.arrayPathEntity(self.plugIn, entity, self.basePt, self.pathRows, self.pathItemsNumber, \
self.distanceBetweenRows, self.distanceBetweenCols, self.pathTangentDirection, self.itemsRotation, \
self.pathPolyline, self.distanceFromStartPt, \
False, self.__highlight) == False:
return
#============================================================================
# doPolarArray
#============================================================================
def doPolarArray(self):
self.__highlight.reset()
dimElaboratedList = [] # lista delle quotature già elaborate
entityIterator = QadCacheEntitySetIterator(self.cacheEntitySet)
for entity in entityIterator:
qadGeom = entity.getQadGeom().copy() # così inizializzo le info qad
# verifico se l'entità appartiene ad uno stile di quotatura
dimEntity = QadDimStyles.getDimEntity(entity)
if dimEntity is not None:
if appendDimEntityIfNotExisting(dimElaboratedList, dimEntity) == False: # quota già elaborata
continue
entity = dimEntity
if qad_array_fun.arrayPolarEntity(self.plugIn, entity, self.basePt, self.centerPt, self.polarItemsNumber, \
self.polarAngleBetween, self.polarRows, self.distanceBetweenRows, self.itemsRotation, \
False, self.__highlight) == False:
return
def canvasMoveEvent(self, event):
QadGetPoint.canvasMoveEvent(self, event)
# # noto il punto base si richiede il secondo punto
# if self.mode == Qad_array_maptool_ModeEnum.BASE_PT_KNOWN_ASK_FOR_COPY_PT:
# self.setCopiedGeometries(self.tmpPoint)
def activate(self):
QadGetPoint.activate(self)
self.__highlight.show()
def deactivate(self):
try: # necessario perché se si chiude QGIS parte questo evento nonostante non ci sia più l'oggetto maptool !
QadGetPoint.deactivate(self)
self.__highlight.hide()
except:
pass
def setMode(self, mode):
self.mode = mode
# non si richiede niente
if self.mode == Qad_array_maptool_ModeEnum.NONE:
self.setSelectionMode(QadGetPointSelectionModeEnum.NONE)
self.setDrawMode(QadGetPointDrawModeEnum.NONE)
# si richiede il punto base
elif self.mode == Qad_array_maptool_ModeEnum.ASK_FOR_BASE_PT:
self.setSelectionMode(QadGetPointSelectionModeEnum.POINT_SELECTION)
self.setDrawMode(QadGetPointDrawModeEnum.NONE)
# si richiede il primo punto per la distanza tra colonne
elif self.mode == Qad_array_maptool_ModeEnum.ASK_FOR_COLUMN_SPACE_FIRST_PT:
self.setSelectionMode(QadGetPointSelectionModeEnum.POINT_SELECTION)
self.setDrawMode(QadGetPointDrawModeEnum.NONE)
# si richiede il primo punto per la dimensione della cella
elif self.mode == Qad_array_maptool_ModeEnum.ASK_FOR_1PT_CELL:
self.setSelectionMode(QadGetPointSelectionModeEnum.POINT_SELECTION)
self.setDrawMode(QadGetPointDrawModeEnum.NONE)
# si richiede il psecondo punto per la dimensione della cella
elif self.mode == Qad_array_maptool_ModeEnum.ASK_FOR_2PT_CELL:
self.setSelectionMode(QadGetPointSelectionModeEnum.POINT_SELECTION)
self.setDrawMode(QadGetPointDrawModeEnum.ELASTIC_RECTANGLE)
self.setStartPoint(self.firstPt)
# si richiede il primo punto per la distanza tra righe
elif self.mode == Qad_array_maptool_ModeEnum.ASK_FOR_ROW_SPACE_FIRST_PT:
self.setSelectionMode(QadGetPointSelectionModeEnum.POINT_SELECTION)
self.setDrawMode(QadGetPointDrawModeEnum.NONE)
# si richiede il secondo punto per la distanza tra colonne
# elif self.mode == Qad_array_maptool_ModeEnum.ASK_FOR_COLUMN_SPACE_SECOND_PT:
# self.setDrawMode(QadGetPointDrawModeEnum.ELASTIC_LINE)
# self.setStartPoint(self.firstPt)
|
gam17/QAD
|
cmd/qad_array_maptool.py
|
Python
|
gpl-3.0
| 9,990
| 0.020048
|
from django.conf import settings
from django.utils import timezone
from rest_framework import authentication
from rest_framework import exceptions
import datetime
import jwt
from .models import User
def generate_jwt(user):
payload = {
'user': user.pk,
'exp': timezone.now() + datetime.timedelta(weeks=2),
'iat': timezone.now()
}
return jwt.encode(payload, settings.SECRET_KEY)
def decode_jwt(token):
return jwt.decode(token, settings.SECRET_KEY)
class JWTAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
token = self._get_jwt_from_header(request)
try:
payload = decode_jwt(token)
except jwt.ExpiredSignature:
detail = 'Signature has expired.'
raise exceptions.AuthenticationFailed(detail=detail)
except jwt.DecodeError:
detail = 'Error decoding token.'
raise exceptions.AuthenticationFailed(detail=detail)
except jwt.InvalidTokenError:
raise exceptions.AuthenticationFailed()
user = self._get_user_by_id(payload)
return (user, token)
def _get_jwt_from_header(self, request):
auth_header = authentication.get_authorization_header(request)
if not auth_header:
detail = 'No Authorization header present.'
raise exceptions.AuthenticationFailed(detail=detail)
try:
prefix, token = auth_header.split()
except ValueError:
detail = 'Invalid Authorization header.'
raise exceptions.AuthenticationFailed(detail=detail)
return token
def _get_user_by_id(self, payload):
user_pk = payload['user']
try:
return User.objects.get(pk=user_pk)
except User.DoesNotExist:
detail = 'Invalid payload.'
raise exceptions.AuthenticationFailed(detail=detail)
|
Jwpe/alexandria-server
|
alexandria_server/permissions/authentication.py
|
Python
|
mit
| 2,060
| 0.000485
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import httplib
import urllib
import socket
class ToolProtocolHTTP(object):
"""
HTTP/HTTPS client for TEMA MBT protocol. Discusses with the TEMA test engine.
"""
# is client connected to the server
isConnected = False
def __init__(self):
self.host = "localhost"
self.port = 80
self.php_file = "temagui_http_proxy.php"
socket.setdefaulttimeout(1800)
def __del__(self):
if self.isConnected:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'CLOSE', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
def __requestreply(self,message ):
""" One http(s) request/reply.
Message: Message to send string.
Returns: Reply string.
"""
http_data = ''
try:
http_connection = None
if self.protocol == "HTTP":
http_connection = httplib.HTTPConnection(self.host, self.port)
elif self.protocol == "HTTPS":
http_connection = httplib.HTTPSConnection(self.host, self.port)
else:
return ''
http_connection.connect()
http_connection.request("POST", self.php_file, message , self.http_headers)
http_response = http_connection.getresponse()
http_data = http_response.read()
http_response.close()
http_connection.close()
except Exception, e:
http_data = ''
return http_data
def init(self, host, path, port, username, protocol):
""" Initialises connection. Sends HELO.
host: Server hostname.
path: path to http proxy in server.
port: port
username: wwwgui username
protocol: http/https
returns: Reply to ACK. On error returns ''
"""
self.http_headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
self.host = host
self.php_file = "/".join(["",path,"temagui_http_proxy.php"])
self.port = port
self.username = username
self.protocol = protocol.upper()
try:
# SEND HELO
http_params = urllib.urlencode({"User" : username, "Message" : 'HELO', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
self.isConnected = True
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
http_data = ''
self.isConnected = False
except Exception, e:
self.isConnected = False
return ''
return http_data
def getKeyword(self):
""" Gets keyword from testserver.
Sends GET to testserver and waits for reply.
Returns: Reply to GET. On error return ''
"""
http_data = ''
try:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'GET', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
self.isConnected = False
return 'ERROR'
if message == 'ERR':
# TODO: don't send ack.
http_data = self.__requestreply(http_params)
http_params = urllib.urlencode({"User" : self.username, "Message" : 'ACK', "Parameter" : 'Empty'})
http_data = self.__requestreply(http_params)
self.isConnected = False
return 'ERROR'
if not http_data.startswith("ACK"):
print http_data
return "ERROR"
else:
#http_data = http_data.partition("ACK")[2].strip()
http_data = http_data.split("ACK")[1].strip()
if http_data == '' or http_data == None:
http_data = ''
self.isConnected = False
except Exception, e:
self.isConnected = False
return http_data
def putResult(self, result):
""" Puts result to testserver.
result: True/False
returns: Reply message to PUT
"""
try:
if result:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'PUT', "Parameter" : 'true'})
else:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'PUT', "Parameter" : 'false'})
except Exception, e:
self.isConnected = False
return ''
try:
http_data = self.__requestreply(http_params)
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
self.isConnected = False
return ''
if http_data == '':
self.isConnected = False
except Exception, e:
self.isConnected = False
http_data = ''
return http_data
def log(self, msg):
""" Sends log message to testserver
returns: Reply to message.
"""
http_data = ''
try:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'LOG', "Parameter" : msg })
http_data = self.__requestreply(http_params)
lines = http_data.splitlines()
if lines != []:
message = lines.pop()
if message == "CLOSE":
self.isConnected = False
return ''
if http_data == '':
self.isConnected = False
except Exception, e:
self.isConnected = False
http_data = ''
return http_data
def bye(self):
""" Sends message BYE to testserver. """
http_data = ''
try:
http_params = urllib.urlencode({"User" : self.username, "Message" : 'BYE', "Parameter" : 'None'})
http_data = self.__requestreply(http_params)
self.isConnected = False
except Exception, e:
self.isConnected = False
return ''
def hasConnection(self):
return self.isConnected
if __name__ == "__main__":
c = ToolProtocol()
print "init -> " + c.init()
print "getKeyword -> " + c.getKeyword()
print "putResult -> " + c.putResult(True)
print "getKeyword -> " + c.getKeyword()
print "putResult -> " + c.putResult(False)
print "invalid -> " + c.invalid()
print "bye -> " + c.bye()
|
tema-mbt/tema-adapterlib
|
adapterlib/ToolProtocolHTTP.py
|
Python
|
mit
| 8,508
| 0.012459
|
# encoding: utf-8
# module samba.dcerpc.drsuapi
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/drsuapi.so
# by generator 1.135
""" drsuapi DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class DsReplicaObjMetaData2Ctr(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
array = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
enumeration_context = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/drsuapi/DsReplicaObjMetaData2Ctr.py
|
Python
|
gpl-2.0
| 880
| 0.007955
|
'd'
def x():
print j
j = 0
def y():
for x in []:
print x
|
lavjain/incubator-hawq
|
tools/bin/pythonSrc/pychecker-0.8.18/test_input/test33.py
|
Python
|
apache-2.0
| 80
| 0.0375
|
# Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow op that scales gradient for backwards pass."""
from typing import Tuple
from sonnet.src import types
import tensorflow as tf
@tf.custom_gradient
def scale_gradient(
t: tf.Tensor, scale: types.FloatLike
) -> Tuple[tf.Tensor, types.GradFn]:
"""Scales gradients for the backwards pass.
Args:
t: A Tensor.
scale: The scale factor for the gradient on the backwards pass.
Returns:
A Tensor same as input, with scaled backward gradient.
"""
def grad(dy: tf.Tensor) -> Tuple[tf.Tensor, None]:
"""Scaled gradient."""
return scale * dy, None
return t, grad
|
deepmind/sonnet
|
sonnet/src/scale_gradient.py
|
Python
|
apache-2.0
| 1,288
| 0.002329
|
##
## This copyrighted software is distributed under the GPL v2.0 license.
## See the LICENSE file for more details.
##
## Yeast workspace configuration file
import numpy as np
import WorkspaceModules.YeastApplicatorPlate
import WorkspaceModules.YeastArena
import WorkspaceModules.YeastArena3x3
YeastWorkspace = { 'baseThickness': 2.93, 'yeastApplicatorPlate': WorkspaceModules.YeastApplicatorPlate.YeastApplicatorPlate(422.0, 247),
'yeastArena': WorkspaceModules.YeastArena.YeastArena(285, 139),
'yeastArena3x3': WorkspaceModules.YeastArena3x3.YeastArena3x3(124, 36) }
|
FlySorterLLC/SantaFeControlSoftware
|
Examples/ExampleYeastWorkspace.py
|
Python
|
gpl-2.0
| 585
| 0.018803
|
"""
Student Views
"""
import datetime
import logging
import uuid
import time
import json
import warnings
from collections import defaultdict
from pytz import UTC
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseServerError, Http404)
from django.shortcuts import redirect
from django.utils.translation import ungettext
from django.utils.http import cookie_date, base36_to_int
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.response import TemplateResponse
from ratelimitbackend.exceptions import RateLimitException
from requests import HTTPError
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile, PendingNameChange,
PendingEmailChange, CourseEnrollment, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED)
from student.forms import AccountCreationForm, PasswordResetFormNoActive
from verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import get_certificate_url, get_active_web_certificate # pylint: disable=import-error
from dark_lang.models import DarkLangConfig
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore import ModuleStoreEnum
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from external_auth.models import ExternalAuthMap
import external_auth.views
from external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from bulk_email.models import Optout, CourseAuthorization
import shoppingcart
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import commit_on_success_with_read_committed
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from microsite_configuration import microsite
from util.password_policy_validators import (
validate_password_length, validate_password_complexity,
validate_password_dictionary
)
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
set_logged_in_cookie, check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page
)
from student.models import anonymous_id_for_user
from xmodule.error_module import ErrorDescriptor
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
# Note that this lives in openedx, so this dependency should be refactored.
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
# The course selection work is done in courseware.courses.
domain = settings.FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False
# do explicit check, because domain=None is valid
if domain is False:
domain = request.META.get('HTTP_HOST')
courses = get_courses(user, domain=domain)
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course. Returns a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
"""
if not course.may_certify():
return {}
return _cert_info(user, course, certificate_status_for_student(user, course.id), course_mode)
def reverification_info(course_enrollment_pairs, user, statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in status_list
Args:
course_enrollment_pairs (list): list of (course, enrollment) tuples
user (User): the user whose information we want
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollment_pairs(user, course_org_filter, org_filter_out_set):
"""
Get the relevant set of (Course, CourseEnrollment) pairs to be displayed on
a student's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
store = modulestore()
with store.bulk_operations(enrollment.course_id):
course = store.get_course(enrollment.course_id)
if course and not isinstance(course, ErrorDescriptor):
# if we are in a Microsite, then filter out anything that is not
# attributed (by ORG) to that Microsite
if course_org_filter and course_org_filter != course.location.org:
continue
# Conversely, if we are not in a Microsite, then let's filter out any enrollments
# with courses attributed (by ORG) to Microsites
elif course.location.org in org_filter_out_set:
continue
yield (course, enrollment)
else:
log.error(
u"User %s enrolled in %s course %s",
user.username,
"broken" if course else "non-existent",
enrollment.course_id
)
def _cert_info(user, course, cert_status, course_mode):
"""
Implements the logic for cert_info -- split out for testing.
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.regenerating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
}
default_status = 'processing'
default_info = {'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing')
if course.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return None
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None
}
if (status in ('generating', 'ready', 'notpassing', 'restricted') and
course.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
if get_active_web_certificate(course) is not None:
certificate_url = get_certificate_url(
user_id=user.id,
course_id=unicode(course.id),
verify_uuid=None
)
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': u'{url}'.format(url=certificate_url)
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
if linkedin_config.enabled:
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course.id,
course.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in ('generating', 'ready', 'notpassing', 'restricted'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
status_dict['grade'] = cert_status['grade']
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(msg) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_by_backend_name(running_pipeline.get('backend'))
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.NAME
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already verified and if verified is an
# option
if 'verified' in modes and enrollment.mode != 'verified':
mode_info['show_upsell'] = True
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not getattr(redeemed_registration.invoice_item.invoice, 'is_valid'):
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key
)
track.views.server_track(request, "change-email1-settings", {"receive_emails": "no", "course": course_key.to_deprecated_string()}, page='dashboard')
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
user = request.user
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
# for microsites, we want to filter and only show enrollments for courses within
# the microsites 'ORG'
course_org_filter = microsite.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a Microsite
org_filter_out_set = microsite.get_all_orgs()
# remove our current Microsite from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollment_pairs = list(get_course_enrollment_pairs(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollment_pairs.sort(key=lambda x: x[1].created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [course.id for course, __ in course_enrollment_pairs]
all_course_modes, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollment_pairs, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
course.id for course, _enrollment in course_enrollment_pairs
if has_access(request.user, 'load', course)
and has_access(request.user, 'view_courseware_with_prerequisites', course)
)
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
course.id: complete_course_mode_info(
course.id, enrollment,
modes=course_modes_by_course[course.id]
)
for course, enrollment in course_enrollment_pairs
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(
user,
course_enrollment_pairs,
all_course_modes
)
cert_statuses = {
course.id: cert_info(request.user, course, _enrollment.mode)
for course, _enrollment in course_enrollment_pairs
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
course.id for course, _enrollment in course_enrollment_pairs if (
settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] and
modulestore().get_modulestore_type(course.id) != ModuleStoreEnum.Type.xml and
CourseAuthorization.instructor_email_enabled(course.id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(course_enrollment_pairs, user, statuses)
show_refund_option_for = frozenset(course.id for course, _enrollment in course_enrollment_pairs
if _enrollment.refundable())
block_courses = frozenset(course.id for course, enrollment in course_enrollment_pairs
if is_course_blocked(request, CourseRegistrationCode.objects.filter(course_id=course.id, registrationcoderedemption__redeemed_by=request.user), course.id))
enrolled_courses_either_paid = frozenset(course.id for course, _enrollment in course_enrollment_pairs
if _enrollment.is_paid_course())
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(course.id for course, _enrollment in course_enrollment_pairs
if course.pre_requisite_courses)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
ccx_membership_triplets = []
if settings.FEATURES.get('CUSTOM_COURSES_EDX', False):
from ccx.utils import get_ccx_membership_triplets
ccx_membership_triplets = get_ccx_membership_triplets(
user, course_org_filter, org_filter_out_set
)
context = {
'enrollment_message': enrollment_message,
'course_enrollment_pairs': course_enrollment_pairs,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse(logout_user),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'ccx_membership_triplets': ccx_membership_triplets,
}
return render_to_response('dashboard.html', context)
def _create_recent_enrollment_message(course_enrollment_pairs, course_modes):
"""Builds a recent course enrollment message
Constructs a new message template based on any recent course enrollments for the student.
Args:
course_enrollment_pairs (list): A list of tuples containing courses, and the associated enrollment information.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollment_pairs)
if recently_enrolled_courses:
messages = [
{
"course_id": course.id,
"course_name": course.display_name,
"allow_donation": _allow_donation(course_modes, course.id, enrollment)
}
for course, enrollment in recently_enrolled_courses
]
platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollment_pairs):
"""Checks to see if the student has recently enrolled in courses.
Checks to see if any of the enrollments in the course_enrollment_pairs have been recently created and activated.
Args:
course_enrollment_pairs (list): A list of tuples containing courses, and the associated enrollment information.
Returns:
A list of courses
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
(course, enrollment) for course, enrollment in course_enrollment_pairs
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
donations_enabled = DonationConfiguration.current().enabled
return donations_enabled and enrollment.mode in course_modes[course_id] and course_modes[course_id][enrollment.mode].min_price == 0
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
@require_POST
@commit_on_success_with_read_committed
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (honor)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "honor".
try:
CourseEnrollment.enroll(user, course_id, check_access=check_access)
except Exception:
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
if not CourseEnrollment.is_enrolled(user, course_id):
return HttpResponseBadRequest(_("You are not enrolled in this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
@never_cache
@ensure_csrf_cookie
def accounts_login(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
redirect_to = get_next_url_for_login_page(request)
context = {
'login_redirect_url': redirect_to,
'pipeline_running': 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': settings.PLATFORM_NAME,
}
return render_to_response('login.html', context)
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable-msg=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
requested_provider = provider.Registry.get_by_backend_name(backend_name)
try:
user = pipeline.get_authenticated_user(username, backend_name)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u'Login failed - user with username {username} has no social auth with backend_name {backend_name}'.format(
username=username, backend_name=backend_name))
return HttpResponse(
_("You've successfully logged into your {provider_name} account, but this account isn't linked with an {platform_name} account yet.").format(
platform_name=settings.PLATFORM_NAME, provider_name=requested_provider.NAME
)
+ "<br/><br/>" +
_("Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard.").format(
platform_name=settings.PLATFORM_NAME, provider_name=requested_provider.NAME
)
+ "<br/><br/>" +
_("If you don't have an {platform_name} account yet, click <strong>Register Now</strong> at the top of the page.").format(
platform_name=settings.PLATFORM_NAME
),
content_type="text/plain",
status=403
)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
"value": _('There was an error receiving your login information. Please email us.'), # TODO: User error message
}) # TODO: this should be status code 400 # pylint: disable=fixme
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('This account has been temporarily locked due to excessive login failures. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(user.id, {
'email': email,
'username': username,
})
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookie(request, response)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("This account has not been activated. We have sent another activation message. Please check your email for the activation instructions.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@ensure_csrf_cookie
def logout_user(request):
"""
HTTP request to log out the user. Redirects to marketing page.
Deletes both the CSRF and sessionid cookies so the marketing
site can determine the logged in state of the user
"""
# We do not log here, because we have a handler registered
# to perform logging on successful logouts.
logout(request)
if settings.FEATURES.get('AUTH_USE_CAS'):
target = reverse('cas-logout')
else:
target = '/'
response = redirect(target)
response.delete_cookie(
settings.EDXMKTG_COOKIE_NAME,
path='/', domain=settings.SESSION_COOKIE_DOMAIN,
)
return response
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.all()[0].changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = microsite.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
if not form.is_valid():
raise ValidationError(form.errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
user.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow for microsites to define their own set of required/optional/hidden fields
extra_fields = microsite.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = microsite.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
tos_required = (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(
external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
# Perform operations within a transaction that are critical to account creation
with transaction.commit_on_success():
# first, create the account
(user, profile, registration) = _do_create_account(form)
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception:
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# Track the user's registration
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(user.id, {
'email': user.email,
'username': user.username,
})
# If the user is registering via 3rd party auth, track which provider they use
provider_name = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_by_backend_name(running_pipeline.get('backend'))
provider_name = current_provider.NAME
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': provider_name
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'))
)
if send_email:
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False)
else:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send activation email to user from "%s"', from_address, exc_info=True)
else:
registration.activate()
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
def set_marketing_cookie(request, response):
"""
Set the login cookie for the edx marketing site on the given response. Its
expiration will match that of the given request's session.
"""
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# we want this cookie to be accessed via javascript
# so httponly is set to None
response.set_cookie(
settings.EDXMKTG_COOKIE_NAME,
'true',
max_age=max_age,
expires=expires,
domain=settings.SESSION_COOKIE_DOMAIN,
path='/',
secure=None,
httponly=None
)
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_marketing_cookie(request, response)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
course_id = request.GET.get('course_id', None)
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except AccountValidationError:
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
# Activate the user
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response
if request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
student = User.objects.filter(id=regs[0].user_id)
if student:
ceas = CourseEnrollmentAllowed.objects.filter(email=student[0].email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student[0], cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student[0].email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student[0].email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
# enroll student in any pending CCXs he/she may have if auto_enroll flag is set
if settings.FEATURES.get('CUSTOM_COURSES_EDX'):
from ccx.models import CcxMembership, CcxFutureMembership
ccxfms = CcxFutureMembership.objects.filter(
email=student[0].email
)
for ccxfm in ccxfms:
if ccxfm.auto_enroll:
CcxMembership.auto_enroll(student[0], ccxfm)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=settings.DEFAULT_FROM_EMAIL,
request=request,
domain_override=request.get_host())
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def password_reset_confirm_wrapper(
request,
uidb36=None,
token=None,
):
""" A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
"""
# cribbed from django.contrib.auth.views.password_reset_confirm
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
user.is_active = True
user.save()
except (ValueError, User.DoesNotExist):
pass
# tie in password strength enforcement as an optional level of
# security protection
err_msg = None
if request.method == 'POST':
password = request.POST['new_password1']
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_length(password)
validate_password_complexity(password)
validate_password_dictionary(password)
except ValidationError, err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
if err_msg:
# We have an password reset attempt which violates some security policy, use the
# existing Django template to communicate this back to the user
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': err_msg,
'platform_name': settings.PLATFORM_NAME,
}
return TemplateResponse(request, 'registration/password_reset_confirm.html', context)
else:
# we also want to pass settings.PLATFORM_NAME in as extra_context
extra_context = {"platform_name": settings.PLATFORM_NAME}
if request.method == 'POST':
# remember what the old password hash is before we call down
old_password_hash = user.password
result = password_reset_confirm(
request, uidb36=uidb36, token=token, extra_context=extra_context
)
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
return result
else:
return password_reset_confirm(
request, uidb36=uidb36, token=token, extra_context=extra_context
)
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send reactivation email from "%s"', settings.DEFAULT_FROM_EMAIL, exc_info=True)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
@transaction.commit_manually
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
try:
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.rollback()
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.rollback()
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.rollback()
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.rollback()
return response
response = render_to_response("email_change_successful.html", address_context)
transaction.commit()
return response
except Exception: # pylint: disable=broad-except
# If we get an unexpected exception, be sure to rollback the transaction
transaction.rollback()
raise
# TODO: DELETE AFTER NEW ACCOUNT PAGE DONE
@ensure_csrf_cookie
@require_POST
def change_name_request(request):
""" Log a request for a new name. """
if not request.user.is_authenticated():
raise Http404
try:
pnc = PendingNameChange.objects.get(user=request.user.id)
except PendingNameChange.DoesNotExist:
pnc = PendingNameChange()
pnc.user = request.user
pnc.new_name = request.POST['new_name'].strip()
pnc.rationale = request.POST['rationale']
if len(pnc.new_name) < 2:
return JsonResponse({
"success": False,
"error": _('Name required'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
pnc.save()
# The following automatically accepts name change requests. Remove this to
# go back to the old system where it gets queued up for admin approval.
accept_name_change_by_id(pnc.id)
return JsonResponse({"success": True})
# TODO: DELETE AFTER NEW ACCOUNT PAGE DONE
def accept_name_change_by_id(uid):
"""
Accepts the pending name change request for the user represented
by user id `uid`.
"""
try:
pnc = PendingNameChange.objects.get(id=uid)
except PendingNameChange.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('Invalid ID'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
user = pnc.user
u_prof = UserProfile.objects.get(user=user)
# Save old name
meta = u_prof.get_meta()
if 'old_names' not in meta:
meta['old_names'] = []
meta['old_names'].append([u_prof.name, pnc.rationale, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.name = pnc.new_name
u_prof.save()
pnc.delete()
return JsonResponse({"success": True})
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id
)
track.views.server_track(request, "change-email-settings", {"receive_emails": "yes", "course": course_id}, page='dashboard')
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id
)
track.views.server_track(request, "change-email-settings", {"receive_emails": "no", "course": course_id}, page='dashboard')
return JsonResponse({"success": True})
|
kamalx/edx-platform
|
common/djangoapps/student/views.py
|
Python
|
agpl-3.0
| 88,916
| 0.002553
|
# Copyright Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import xml
from tests.tools import *
from azurelinuxagent.common.protocol.wire import *
from azurelinuxagent.common.osutil import get_osutil
class TestRemoteAccess(AgentTestCase):
def test_parse_remote_access(self):
data_str = load_data('wire/remote_access_single_account.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals("1", remote_access.incarnation)
self.assertEquals(1, len(remote_access.user_list.users), "User count does not match.")
self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.")
@patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state',
return_value=GoalState(load_data('wire/goal_state.xml')))
def test_update_remote_access_conf_no_remote_access(self, _):
protocol = WireProtocol('12.34.56.78')
goal_state = protocol.client.get_goal_state()
protocol.client.update_remote_access_conf(goal_state)
def test_parse_two_remote_access_accounts(self):
data_str = load_data('wire/remote_access_two_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals("1", remote_access.incarnation)
self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.")
self.assertEquals("testAccount1", remote_access.user_list.users[0].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.")
self.assertEquals("testAccount2", remote_access.user_list.users[1].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.")
def test_parse_ten_remote_access_accounts(self):
data_str = load_data('wire/remote_access_10_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals(10, len(remote_access.user_list.users), "User count does not match.")
def test_parse_duplicate_remote_access_accounts(self):
data_str = load_data('wire/remote_access_duplicate_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals(2, len(remote_access.user_list.users), "User count does not match.")
self.assertEquals("testAccount", remote_access.user_list.users[0].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.")
self.assertEquals("testAccount", remote_access.user_list.users[1].name, "Account name does not match")
self.assertEquals("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.")
self.assertEquals("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.")
def test_parse_zero_remote_access_accounts(self):
data_str = load_data('wire/remote_access_no_accounts.xml')
remote_access = RemoteAccess(data_str)
self.assertNotEquals(None, remote_access)
self.assertEquals(0, len(remote_access.user_list.users), "User count does not match.")
@patch('azurelinuxagent.common.protocol.wire.WireClient.get_goal_state',
return_value=GoalState(load_data('wire/goal_state_remote_access.xml')))
@patch('azurelinuxagent.common.protocol.wire.WireClient.fetch_config',
return_value=load_data('wire/remote_access_single_account.xml'))
@patch('azurelinuxagent.common.protocol.wire.WireClient.get_header_for_cert')
def test_update_remote_access_conf_remote_access(self, _1, _2, _3):
protocol = WireProtocol('12.34.56.78')
goal_state = protocol.client.get_goal_state()
protocol.client.update_remote_access_conf(goal_state)
self.assertNotEquals(None, protocol.client.remote_access)
self.assertEquals(1, len(protocol.client.remote_access.user_list.users))
self.assertEquals('testAccount', protocol.client.remote_access.user_list.users[0].name)
self.assertEquals('encryptedPasswordString', protocol.client.remote_access.user_list.users[0].encrypted_password)
def test_parse_bad_remote_access_data(self):
data = "foobar"
self.assertRaises(xml.parsers.expat.ExpatError, RemoteAccess, data)
|
hglkrijger/WALinuxAgent
|
tests/ga/test_remoteaccess.py
|
Python
|
apache-2.0
| 5,846
| 0.005132
|
# coding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django import forms
from lib.l10n_utils.dotlang import _, _lazy
from bedrock.mozorg.forms import HoneyPotWidget
FRAUD_REPORT_FILE_SIZE_LIMIT = 5242880 # 5MB
class FraudReportForm(forms.Form):
input_url = forms.URLField(
max_length=40,
required=True,
error_messages={
'required': _lazy(u'Please enter a URL.'),
},
widget=forms.TextInput(
attrs={
'size': 40,
'placeholder': _lazy(u'http://offendingsite.com'),
'class': 'required fill-width',
'required': 'required',
'aria-required': 'true',
}
)
)
input_category = forms.ChoiceField(
choices=(
('Charging for software', _lazy(u'Charging for software')),
('Collecting personal information', _lazy(u'Collecting personal information')),
('Domain name violation', _lazy(u'Domain name violation')),
('Logo misuse/modification', _lazy(u'Logo misuse/modification')),
('Distributing modified Firefox/malware', _lazy(u'Distributing modified Firefox/malware')),
),
required=True,
error_messages={
'required': _lazy('Please select a category.'),
},
widget=forms.Select(
attrs={
'title': _lazy(u'Category'),
'class': 'required',
'required': 'required',
'aria-required': 'true',
}
)
)
input_product = forms.ChoiceField(
choices=(
('Firefox', _lazy(u'Firefox')),
('SeaMonkey', _lazy(u'SeaMonkey')),
('Thunderbird', _lazy(u'Thunderbird')),
('Other Mozilla Product/Project', _lazy(u'Other Mozilla Product/Project (specify)')),
),
required=True,
error_messages={
'required': _lazy('Please select a product.'),
},
widget=forms.Select(
attrs={
'title': _lazy(u'Product'),
'class': 'required',
'required': 'required',
'aria-required': 'true',
}
)
)
input_specific_product = forms.CharField(
max_length=80,
required=False,
widget=forms.TextInput(
attrs={
'size': 20,
'class': 'fill-width'
}
)
)
input_details = forms.CharField(
required=False,
widget=forms.Textarea(
attrs={
'rows': '',
'cols': '',
'class': 'fill-width'
}
)
)
input_attachment = forms.FileField(
required=False,
)
input_attachment_desc = forms.CharField(
max_length=40,
required=False,
widget=forms.Textarea(
attrs={
'rows': '',
'cols': '',
'class': 'fill-width'
}
)
)
input_email = forms.EmailField(
max_length=80,
required=False,
error_messages={
'invalid': _lazy(u'Please enter a valid email address'),
},
widget=forms.TextInput(
attrs={
'size': 20,
'class': 'fill-width'
}
)
)
superpriority = forms.BooleanField(widget=HoneyPotWidget, required=False)
def clean_input_attachment(self):
cleaned_data = super(FraudReportForm, self).clean()
attachment = cleaned_data.get("input_attachment")
if attachment:
if attachment._size > FRAUD_REPORT_FILE_SIZE_LIMIT:
raise forms.ValidationError(
_("Attachment must not exceed 5MB"))
return attachment
def clean_superpriority(self):
cleaned_data = super(FraudReportForm, self).clean()
honeypot = cleaned_data.pop('superpriority', None)
if honeypot:
raise forms.ValidationError(
_('Your submission could not be processed'))
|
mmmavis/lightbeam-bedrock-website
|
bedrock/legal/forms.py
|
Python
|
mpl-2.0
| 4,294
| 0.000699
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
import time
import urllib
import re
import threading
import datetime
import random
import locale
from Cheetah.Template import Template
import cherrypy.lib
import sickbeard
from sickbeard import config, sab
from sickbeard import clients
from sickbeard import history, notifiers, processTV
from sickbeard import ui
from sickbeard import logger, helpers, exceptions, classes, db
from sickbeard import encodingKludge as ek
from sickbeard import search_queue
from sickbeard import image_cache
from sickbeard import scene_exceptions
from sickbeard import naming
from sickbeard import subtitles
from sickbeard.providers import newznab
from sickbeard.common import Quality, Overview, statusStrings
from sickbeard.common import SNATCHED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED
from sickbeard.exceptions import ex
from sickbeard.webapi import Api
from lib.tvdb_api import tvdb_api
from lib.dateutil import tz
import network_timezones
import subliminal
try:
import json
except ImportError:
from lib import simplejson as json
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from sickbeard import browser
class PageTemplate (Template):
def __init__(self, *args, **KWs):
KWs['file'] = os.path.join(sickbeard.PROG_DIR, "data/interfaces/default/", KWs['file'])
super(PageTemplate, self).__init__(*args, **KWs)
self.sbRoot = sickbeard.WEB_ROOT
self.sbHttpPort = sickbeard.WEB_PORT
self.sbHttpsPort = sickbeard.WEB_PORT
self.sbHttpsEnabled = sickbeard.ENABLE_HTTPS
if cherrypy.request.headers['Host'][0] == '[':
self.sbHost = re.match("^\[.*\]", cherrypy.request.headers['Host'], re.X|re.M|re.S).group(0)
else:
self.sbHost = re.match("^[^:]+", cherrypy.request.headers['Host'], re.X|re.M|re.S).group(0)
self.projectHomePage = "http://code.google.com/p/sickbeard/"
if sickbeard.NZBS and sickbeard.NZBS_UID and sickbeard.NZBS_HASH:
logger.log(u"NZBs.org has been replaced, please check the config to configure the new provider!", logger.ERROR)
ui.notifications.error("NZBs.org Config Update", "NZBs.org has a new site. Please <a href=\""+sickbeard.WEB_ROOT+"/config/providers\">update your config</a> with the api key from <a href=\"http://nzbs.org/login\">http://nzbs.org</a> and then disable the old NZBs.org provider.")
if "X-Forwarded-Host" in cherrypy.request.headers:
self.sbHost = cherrypy.request.headers['X-Forwarded-Host']
if "X-Forwarded-Port" in cherrypy.request.headers:
self.sbHttpPort = cherrypy.request.headers['X-Forwarded-Port']
self.sbHttpsPort = self.sbHttpPort
if "X-Forwarded-Proto" in cherrypy.request.headers:
self.sbHttpsEnabled = True if cherrypy.request.headers['X-Forwarded-Proto'] == 'https' else False
logPageTitle = 'Logs & Errors'
if len(classes.ErrorViewer.errors):
logPageTitle += ' ('+str(len(classes.ErrorViewer.errors))+')'
self.logPageTitle = logPageTitle
self.sbPID = str(sickbeard.PID)
self.menu = [
{ 'title': 'Home', 'key': 'home' },
{ 'title': 'Coming Episodes', 'key': 'comingEpisodes' },
{ 'title': 'History', 'key': 'history' },
{ 'title': 'Manage', 'key': 'manage' },
{ 'title': 'Config', 'key': 'config' },
{ 'title': logPageTitle, 'key': 'errorlogs' },
]
def redirect(abspath, *args, **KWs):
assert abspath[0] == '/'
raise cherrypy.HTTPRedirect(sickbeard.WEB_ROOT + abspath, *args, **KWs)
class TVDBWebUI:
def __init__(self, config, log=None):
self.config = config
self.log = log
def selectSeries(self, allSeries):
searchList = ",".join([x['id'] for x in allSeries])
showDirList = ""
for curShowDir in self.config['_showDir']:
showDirList += "showDir="+curShowDir+"&"
redirect("/home/addShows/addShow?" + showDirList + "seriesList=" + searchList)
def _munge(string):
return unicode(string).encode('utf-8', 'xmlcharrefreplace')
def _genericMessage(subject, message):
t = PageTemplate(file="genericMessage.tmpl")
t.submenu = HomeMenu()
t.subject = subject
t.message = message
return _munge(t)
def _getEpisode(show, season, episode):
if show == None or season == None or episode == None:
return "Invalid parameters"
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return "Show not in show list"
epObj = showObj.getEpisode(int(season), int(episode))
if epObj == None:
return "Episode couldn't be retrieved"
return epObj
ManageMenu = [
{ 'title': 'Backlog Overview', 'path': 'manage/backlogOverview' },
{ 'title': 'Manage Searches', 'path': 'manage/manageSearches' },
{ 'title': 'Episode Status Management', 'path': 'manage/episodeStatuses' },
{ 'title': 'Manage Missed Subtitles', 'path': 'manage/subtitleMissed' },
]
if sickbeard.USE_SUBTITLES:
ManageMenu.append({ 'title': 'Missed Subtitle Management', 'path': 'manage/subtitleMissed' })
class ManageSearches:
@cherrypy.expose
def index(self):
t = PageTemplate(file="manage_manageSearches.tmpl")
#t.backlogPI = sickbeard.backlogSearchScheduler.action.getProgressIndicator()
t.backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() #@UndefinedVariable
t.backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() #@UndefinedVariable
t.searchStatus = sickbeard.currentSearchScheduler.action.amActive #@UndefinedVariable
t.submenu = ManageMenu
return _munge(t)
@cherrypy.expose
def forceSearch(self):
# force it to run the next time it looks
result = sickbeard.currentSearchScheduler.forceRun()
if result:
logger.log(u"Search forced")
ui.notifications.message('Episode search started',
'Note: RSS feeds may not be updated if retrieved recently')
redirect("/manage/manageSearches")
@cherrypy.expose
def pauseBacklog(self, paused=None):
if paused == "1":
sickbeard.searchQueueScheduler.action.pause_backlog() #@UndefinedVariable
else:
sickbeard.searchQueueScheduler.action.unpause_backlog() #@UndefinedVariable
redirect("/manage/manageSearches")
@cherrypy.expose
def forceVersionCheck(self):
# force a check to see if there is a new version
result = sickbeard.versionCheckScheduler.action.check_for_new_version(force=True) #@UndefinedVariable
if result:
logger.log(u"Forcing version check")
redirect("/manage/manageSearches")
class Manage:
manageSearches = ManageSearches()
@cherrypy.expose
def index(self):
t = PageTemplate(file="manage.tmpl")
t.submenu = ManageMenu
return _munge(t)
@cherrypy.expose
def showEpisodeStatuses(self, tvdb_id, whichStatus):
myDB = db.DBConnection()
status_list = [int(whichStatus)]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH
cur_show_results = myDB.select("SELECT season, episode, name FROM tv_episodes WHERE showid = ? AND season != 0 AND status IN ("+','.join(['?']*len(status_list))+")", [int(tvdb_id)] + status_list)
result = {}
for cur_result in cur_show_results:
cur_season = int(cur_result["season"])
cur_episode = int(cur_result["episode"])
if cur_season not in result:
result[cur_season] = {}
result[cur_season][cur_episode] = cur_result["name"]
return json.dumps(result)
@cherrypy.expose
def episodeStatuses(self, whichStatus=None):
if whichStatus:
whichStatus = int(whichStatus)
status_list = [whichStatus]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH
else:
status_list = []
t = PageTemplate(file="manage_episodeStatuses.tmpl")
t.submenu = ManageMenu
t.whichStatus = whichStatus
# if we have no status then this is as far as we need to go
if not status_list:
return _munge(t)
myDB = db.DBConnection()
status_results = myDB.select("SELECT show_name, tv_shows.tvdb_id as tvdb_id FROM tv_episodes, tv_shows WHERE tv_episodes.status IN ("+','.join(['?']*len(status_list))+") AND season != 0 AND tv_episodes.showid = tv_shows.tvdb_id ORDER BY show_name", status_list)
ep_counts = {}
show_names = {}
sorted_show_ids = []
for cur_status_result in status_results:
cur_tvdb_id = int(cur_status_result["tvdb_id"])
if cur_tvdb_id not in ep_counts:
ep_counts[cur_tvdb_id] = 1
else:
ep_counts[cur_tvdb_id] += 1
show_names[cur_tvdb_id] = cur_status_result["show_name"]
if cur_tvdb_id not in sorted_show_ids:
sorted_show_ids.append(cur_tvdb_id)
t.show_names = show_names
t.ep_counts = ep_counts
t.sorted_show_ids = sorted_show_ids
return _munge(t)
@cherrypy.expose
def changeEpisodeStatuses(self, oldStatus, newStatus, *args, **kwargs):
status_list = [int(oldStatus)]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH
to_change = {}
# make a list of all shows and their associated args
for arg in kwargs:
tvdb_id, what = arg.split('-')
# we don't care about unchecked checkboxes
if kwargs[arg] != 'on':
continue
if tvdb_id not in to_change:
to_change[tvdb_id] = []
to_change[tvdb_id].append(what)
myDB = db.DBConnection()
for cur_tvdb_id in to_change:
# get a list of all the eps we want to change if they just said "all"
if 'all' in to_change[cur_tvdb_id]:
all_eps_results = myDB.select("SELECT season, episode FROM tv_episodes WHERE status IN ("+','.join(['?']*len(status_list))+") AND season != 0 AND showid = ?", status_list + [cur_tvdb_id])
all_eps = [str(x["season"])+'x'+str(x["episode"]) for x in all_eps_results]
to_change[cur_tvdb_id] = all_eps
Home().setStatus(cur_tvdb_id, '|'.join(to_change[cur_tvdb_id]), newStatus, direct=True)
redirect('/manage/episodeStatuses')
@cherrypy.expose
def showSubtitleMissed(self, tvdb_id, whichSubs):
myDB = db.DBConnection()
cur_show_results = myDB.select("SELECT season, episode, name, subtitles FROM tv_episodes WHERE showid = ? AND season != 0 AND status LIKE '%4'", [int(tvdb_id)])
result = {}
for cur_result in cur_show_results:
if whichSubs == 'all':
if len(set(cur_result["subtitles"].split(',')).intersection(set(subtitles.wantedLanguages()))) >= len(subtitles.wantedLanguages()):
continue
elif whichSubs in cur_result["subtitles"].split(','):
continue
cur_season = int(cur_result["season"])
cur_episode = int(cur_result["episode"])
if cur_season not in result:
result[cur_season] = {}
if cur_episode not in result[cur_season]:
result[cur_season][cur_episode] = {}
result[cur_season][cur_episode]["name"] = cur_result["name"]
result[cur_season][cur_episode]["subtitles"] = ",".join(subliminal.language.Language(subtitle).alpha2 for subtitle in cur_result["subtitles"].split(',')) if not cur_result["subtitles"] == '' else ''
return json.dumps(result)
@cherrypy.expose
def subtitleMissed(self, whichSubs=None):
t = PageTemplate(file="manage_subtitleMissed.tmpl")
t.submenu = ManageMenu
t.whichSubs = whichSubs
if not whichSubs:
return _munge(t)
myDB = db.DBConnection()
status_results = myDB.select("SELECT show_name, tv_shows.tvdb_id as tvdb_id, tv_episodes.subtitles subtitles FROM tv_episodes, tv_shows WHERE tv_shows.subtitles = 1 AND tv_episodes.status LIKE '%4' AND tv_episodes.season != 0 AND tv_episodes.showid = tv_shows.tvdb_id ORDER BY show_name")
ep_counts = {}
show_names = {}
sorted_show_ids = []
for cur_status_result in status_results:
if whichSubs == 'all':
if len(set(cur_status_result["subtitles"].split(',')).intersection(set(subtitles.wantedLanguages()))) >= len(subtitles.wantedLanguages()):
continue
elif whichSubs in cur_status_result["subtitles"].split(','):
continue
cur_tvdb_id = int(cur_status_result["tvdb_id"])
if cur_tvdb_id not in ep_counts:
ep_counts[cur_tvdb_id] = 1
else:
ep_counts[cur_tvdb_id] += 1
show_names[cur_tvdb_id] = cur_status_result["show_name"]
if cur_tvdb_id not in sorted_show_ids:
sorted_show_ids.append(cur_tvdb_id)
t.show_names = show_names
t.ep_counts = ep_counts
t.sorted_show_ids = sorted_show_ids
return _munge(t)
@cherrypy.expose
def downloadSubtitleMissed(self, *args, **kwargs):
to_download = {}
# make a list of all shows and their associated args
for arg in kwargs:
tvdb_id, what = arg.split('-')
# we don't care about unchecked checkboxes
if kwargs[arg] != 'on':
continue
if tvdb_id not in to_download:
to_download[tvdb_id] = []
to_download[tvdb_id].append(what)
for cur_tvdb_id in to_download:
# get a list of all the eps we want to download subtitles if they just said "all"
if 'all' in to_download[cur_tvdb_id]:
myDB = db.DBConnection()
all_eps_results = myDB.select("SELECT season, episode FROM tv_episodes WHERE status LIKE '%4' AND season != 0 AND showid = ?", [cur_tvdb_id])
to_download[cur_tvdb_id] = [str(x["season"])+'x'+str(x["episode"]) for x in all_eps_results]
for epResult in to_download[cur_tvdb_id]:
season, episode = epResult.split('x');
show = sickbeard.helpers.findCertainShow(sickbeard.showList, int(cur_tvdb_id))
subtitles = show.getEpisode(int(season), int(episode)).downloadSubtitles()
redirect('/manage/subtitleMissed')
@cherrypy.expose
def backlogShow(self, tvdb_id):
show_obj = helpers.findCertainShow(sickbeard.showList, int(tvdb_id))
if show_obj:
sickbeard.backlogSearchScheduler.action.searchBacklog([show_obj]) #@UndefinedVariable
redirect("/manage/backlogOverview")
@cherrypy.expose
def backlogOverview(self):
t = PageTemplate(file="manage_backlogOverview.tmpl")
t.submenu = ManageMenu
myDB = db.DBConnection()
showCounts = {}
showCats = {}
showSQLResults = {}
for curShow in sickbeard.showList:
epCounts = {}
epCats = {}
epCounts[Overview.SKIPPED] = 0
epCounts[Overview.WANTED] = 0
epCounts[Overview.QUAL] = 0
epCounts[Overview.GOOD] = 0
epCounts[Overview.UNAIRED] = 0
epCounts[Overview.SNATCHED] = 0
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC", [curShow.tvdbid])
for curResult in sqlResults:
curEpCat = curShow.getOverview(int(curResult["status"]))
epCats[str(curResult["season"]) + "x" + str(curResult["episode"])] = curEpCat
epCounts[curEpCat] += 1
showCounts[curShow.tvdbid] = epCounts
showCats[curShow.tvdbid] = epCats
showSQLResults[curShow.tvdbid] = sqlResults
t.showCounts = showCounts
t.showCats = showCats
t.showSQLResults = showSQLResults
return _munge(t)
@cherrypy.expose
def massEdit(self, toEdit=None):
t = PageTemplate(file="manage_massEdit.tmpl")
t.submenu = ManageMenu
if not toEdit:
redirect("/manage")
showIDs = toEdit.split("|")
showList = []
for curID in showIDs:
curID = int(curID)
showObj = helpers.findCertainShow(sickbeard.showList, curID)
if showObj:
showList.append(showObj)
flatten_folders_all_same = True
last_flatten_folders = None
paused_all_same = True
last_paused = None
frenched_all_same = True
last_frenched = None
quality_all_same = True
last_quality = None
subtitles_all_same = True
last_subtitles = None
lang_all_same = True
last_lang_metadata= None
lang_audio_all_same = True
last_lang_audio = None
root_dir_list = []
for curShow in showList:
cur_root_dir = ek.ek(os.path.dirname, curShow._location)
if cur_root_dir not in root_dir_list:
root_dir_list.append(cur_root_dir)
# if we know they're not all the same then no point even bothering
if paused_all_same:
# if we had a value already and this value is different then they're not all the same
if last_paused not in (curShow.paused, None):
paused_all_same = False
else:
last_paused = curShow.paused
if frenched_all_same:
# if we had a value already and this value is different then they're not all the same
if last_frenched not in (curShow.frenchsearch, None):
frenched_all_same = False
else:
last_frenched = curShow.frenchsearch
if flatten_folders_all_same:
if last_flatten_folders not in (None, curShow.flatten_folders):
flatten_folders_all_same = False
else:
last_flatten_folders = curShow.flatten_folders
if quality_all_same:
if last_quality not in (None, curShow.quality):
quality_all_same = False
else:
last_quality = curShow.quality
if subtitles_all_same:
if last_subtitles not in (None, curShow.subtitles):
subtitles_all_same = False
else:
last_subtitles = curShow.subtitles
if lang_all_same:
if last_lang_metadata not in (None, curShow.lang):
lang_all_same = False
else:
last_lang_metadata = curShow.lang
if lang_audio_all_same:
if last_lang_audio not in (None, curShow.audio_lang):
lang_audio_all_same = False
else:
last_lang_audio = curShow.audio_lang
t.showList = toEdit
t.paused_value = last_paused if paused_all_same else None
t.frenched_value = last_frenched if frenched_all_same else None
t.flatten_folders_value = last_flatten_folders if flatten_folders_all_same else None
t.quality_value = last_quality if quality_all_same else None
t.subtitles_value = last_subtitles if subtitles_all_same else None
t.root_dir_list = root_dir_list
t.lang_value = last_lang_metadata if lang_all_same else None
t.audio_value = last_lang_audio if lang_audio_all_same else None
return _munge(t)
@cherrypy.expose
def massEditSubmit(self, paused=None, frenched=None, flatten_folders=None, quality_preset=False, subtitles=None,
anyQualities=[], bestQualities=[], tvdbLang=None, audioLang = None, toEdit=None, *args, **kwargs):
dir_map = {}
for cur_arg in kwargs:
if not cur_arg.startswith('orig_root_dir_'):
continue
which_index = cur_arg.replace('orig_root_dir_', '')
end_dir = kwargs['new_root_dir_'+which_index]
dir_map[kwargs[cur_arg]] = end_dir
showIDs = toEdit.split("|")
errors = []
for curShow in showIDs:
curErrors = []
showObj = helpers.findCertainShow(sickbeard.showList, int(curShow))
if not showObj:
continue
cur_root_dir = ek.ek(os.path.dirname, showObj._location)
cur_show_dir = ek.ek(os.path.basename, showObj._location)
if cur_root_dir in dir_map and cur_root_dir != dir_map[cur_root_dir]:
new_show_dir = ek.ek(os.path.join, dir_map[cur_root_dir], cur_show_dir)
logger.log(u"For show "+showObj.name+" changing dir from "+showObj._location+" to "+new_show_dir)
else:
new_show_dir = showObj._location
if paused == 'keep':
new_paused = showObj.paused
else:
new_paused = True if paused == 'enable' else False
new_paused = 'on' if new_paused else 'off'
if frenched == 'keep':
new_frenched = showObj.frenchsearch
else:
new_frenched = True if frenched == 'enable' else False
new_frenched = 'on' if new_frenched else 'off'
if flatten_folders == 'keep':
new_flatten_folders = showObj.flatten_folders
else:
new_flatten_folders = True if flatten_folders == 'enable' else False
new_flatten_folders = 'on' if new_flatten_folders else 'off'
if subtitles == 'keep':
new_subtitles = showObj.subtitles
else:
new_subtitles = True if subtitles == 'enable' else False
new_subtitles = 'on' if new_subtitles else 'off'
if quality_preset == 'keep':
anyQualities, bestQualities = Quality.splitQuality(showObj.quality)
if tvdbLang == 'None':
new_lang = 'en'
else:
new_lang = tvdbLang
if audioLang == 'keep':
new_audio_lang = showObj.audio_lang;
else:
new_audio_lang = audioLang
exceptions_list = []
curErrors += Home().editShow(curShow, new_show_dir, anyQualities, bestQualities, exceptions_list, new_flatten_folders, new_paused, new_frenched, subtitles=new_subtitles, tvdbLang=new_lang, audio_lang=new_audio_lang, directCall=True)
if curErrors:
logger.log(u"Errors: "+str(curErrors), logger.ERROR)
errors.append('<b>%s:</b>\n<ul>' % showObj.name + ' '.join(['<li>%s</li>' % error for error in curErrors]) + "</ul>")
if len(errors) > 0:
ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"),
" ".join(errors))
redirect("/manage")
@cherrypy.expose
def massUpdate(self, toUpdate=None, toRefresh=None, toRename=None, toDelete=None, toMetadata=None, toSubtitle=None):
if toUpdate != None:
toUpdate = toUpdate.split('|')
else:
toUpdate = []
if toRefresh != None:
toRefresh = toRefresh.split('|')
else:
toRefresh = []
if toRename != None:
toRename = toRename.split('|')
else:
toRename = []
if toSubtitle != None:
toSubtitle = toSubtitle.split('|')
else:
toSubtitle = []
if toDelete != None:
toDelete = toDelete.split('|')
else:
toDelete = []
if toMetadata != None:
toMetadata = toMetadata.split('|')
else:
toMetadata = []
errors = []
refreshes = []
updates = []
renames = []
subtitles = []
for curShowID in set(toUpdate+toRefresh+toRename+toSubtitle+toDelete+toMetadata):
if curShowID == '':
continue
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(curShowID))
if showObj == None:
continue
if curShowID in toDelete:
showObj.deleteShow()
# don't do anything else if it's being deleted
continue
if curShowID in toUpdate:
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, True) #@UndefinedVariable
updates.append(showObj.name)
except exceptions.CantUpdateException, e:
errors.append("Unable to update show "+showObj.name+": "+ex(e))
# don't bother refreshing shows that were updated anyway
if curShowID in toRefresh and curShowID not in toUpdate:
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
refreshes.append(showObj.name)
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh show "+showObj.name+": "+ex(e))
if curShowID in toRename:
sickbeard.showQueueScheduler.action.renameShowEpisodes(showObj) #@UndefinedVariable
renames.append(showObj.name)
if curShowID in toSubtitle:
sickbeard.showQueueScheduler.action.downloadSubtitles(showObj) #@UndefinedVariable
subtitles.append(showObj.name)
if len(errors) > 0:
ui.notifications.error("Errors encountered",
'<br >\n'.join(errors))
messageDetail = ""
if len(updates) > 0:
messageDetail += "<br /><b>Updates</b><br /><ul><li>"
messageDetail += "</li><li>".join(updates)
messageDetail += "</li></ul>"
if len(refreshes) > 0:
messageDetail += "<br /><b>Refreshes</b><br /><ul><li>"
messageDetail += "</li><li>".join(refreshes)
messageDetail += "</li></ul>"
if len(renames) > 0:
messageDetail += "<br /><b>Renames</b><br /><ul><li>"
messageDetail += "</li><li>".join(renames)
messageDetail += "</li></ul>"
if len(subtitles) > 0:
messageDetail += "<br /><b>Subtitles</b><br /><ul><li>"
messageDetail += "</li><li>".join(subtitles)
messageDetail += "</li></ul>"
if len(updates+refreshes+renames+subtitles) > 0:
ui.notifications.message("The following actions were queued:",
messageDetail)
redirect("/manage")
class History:
@cherrypy.expose
def index(self, limit=100):
myDB = db.DBConnection()
# sqlResults = myDB.select("SELECT h.*, show_name, name FROM history h, tv_shows s, tv_episodes e WHERE h.showid=s.tvdb_id AND h.showid=e.showid AND h.season=e.season AND h.episode=e.episode ORDER BY date DESC LIMIT "+str(numPerPage*(p-1))+", "+str(numPerPage))
if limit == "0":
sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id ORDER BY date DESC")
else:
sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id ORDER BY date DESC LIMIT ?", [limit])
t = PageTemplate(file="history.tmpl")
t.historyResults = sqlResults
t.limit = limit
t.submenu = [
{ 'title': 'Clear History', 'path': 'history/clearHistory' },
{ 'title': 'Trim History', 'path': 'history/trimHistory' },
{ 'title': 'Trunc Episode Links', 'path': 'history/truncEplinks' },
{ 'title': 'Trunc Episode List Processed', 'path': 'history/truncEpListProc' },
]
return _munge(t)
@cherrypy.expose
def clearHistory(self):
myDB = db.DBConnection()
myDB.action("DELETE FROM history WHERE 1=1")
ui.notifications.message('History cleared')
redirect("/history")
@cherrypy.expose
def trimHistory(self):
myDB = db.DBConnection()
myDB.action("DELETE FROM history WHERE date < "+str((datetime.datetime.today()-datetime.timedelta(days=30)).strftime(history.dateFormat)))
ui.notifications.message('Removed history entries greater than 30 days old')
redirect("/history")
@cherrypy.expose
def truncEplinks(self):
myDB = db.DBConnection()
nbep=myDB.select("SELECT count(*) from episode_links")
myDB.action("DELETE FROM episode_links WHERE 1=1")
messnum = str(nbep[0][0]) + ' history links deleted'
ui.notifications.message('All Episode Links Removed', messnum)
redirect("/history")
@cherrypy.expose
def truncEpListProc(self):
myDB = db.DBConnection()
nbep=myDB.select("SELECT count(*) from processed_files")
myDB.action("DELETE FROM processed_files WHERE 1=1")
messnum = str(nbep[0][0]) + ' record for file processed delete'
ui.notifications.message('Clear list of file processed', messnum)
redirect("/history")
ConfigMenu = [
{ 'title': 'General', 'path': 'config/general/' },
{ 'title': 'Search Settings', 'path': 'config/search/' },
{ 'title': 'Search Providers', 'path': 'config/providers/' },
{ 'title': 'Subtitles Settings','path': 'config/subtitles/' },
{ 'title': 'Post Processing', 'path': 'config/postProcessing/' },
{ 'title': 'Notifications', 'path': 'config/notifications/' },
]
class ConfigGeneral:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_general.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveRootDirs(self, rootDirString=None):
sickbeard.ROOT_DIRS = rootDirString
sickbeard.save_config()
@cherrypy.expose
def saveAddShowDefaults(self, defaultFlattenFolders, defaultStatus, anyQualities, bestQualities, audio_lang, subtitles=None):
if anyQualities:
anyQualities = anyQualities.split(',')
else:
anyQualities = []
if bestQualities:
bestQualities = bestQualities.split(',')
else:
bestQualities = []
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
sickbeard.STATUS_DEFAULT = int(defaultStatus)
sickbeard.QUALITY_DEFAULT = int(newQuality)
sickbeard.AUDIO_SHOW_DEFAULT = str(audio_lang)
if defaultFlattenFolders == "true":
defaultFlattenFolders = 1
else:
defaultFlattenFolders = 0
sickbeard.FLATTEN_FOLDERS_DEFAULT = int(defaultFlattenFolders)
if subtitles == "true":
subtitles = 1
else:
subtitles = 0
sickbeard.SUBTITLES_DEFAULT = int(subtitles)
sickbeard.save_config()
@cherrypy.expose
def generateKey(self):
""" Return a new randomized API_KEY
"""
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Create some values to seed md5
t = str(time.time())
r = str(random.random())
# Create the md5 instance and give it the current time
m = md5(t)
# Update the md5 instance with the random variable
m.update(r)
# Return a hex digest of the md5, eg 49f68a5c8493ec2c0bf489821c21fc3b
logger.log(u"New API generated")
return m.hexdigest()
@cherrypy.expose
def saveGeneral(self, log_dir=None, web_port=None, web_log=None, web_ipv6=None,
update_shows_on_start=None,launch_browser=None, web_username=None, use_api=None, api_key=None,
web_password=None, version_notify=None, enable_https=None, https_cert=None, https_key=None, sort_article=None, french_column=None):
results = []
if web_ipv6 == "on":
web_ipv6 = 1
else:
web_ipv6 = 0
if web_log == "on":
web_log = 1
else:
web_log = 0
if launch_browser == "on":
launch_browser = 1
else:
launch_browser = 0
if update_shows_on_start == "on":
update_shows_on_start = 1
else:
update_shows_on_start = 0
if sort_article == "on":
sort_article = 1
else:
sort_article = 0
if french_column == "on":
french_column = 1
else:
french_column= 0
if version_notify == "on":
version_notify = 1
else:
version_notify = 0
if not config.change_LOG_DIR(log_dir):
results += ["Unable to create directory " + os.path.normpath(log_dir) + ", log dir not changed."]
sickbeard.UPDATE_SHOWS_ON_START = update_shows_on_start
sickbeard.LAUNCH_BROWSER = launch_browser
sickbeard.SORT_ARTICLE = sort_article
sickbeard.FRENCH_COLUMN = french_column
sickbeard.WEB_PORT = int(web_port)
sickbeard.WEB_IPV6 = web_ipv6
sickbeard.WEB_LOG = web_log
sickbeard.WEB_USERNAME = web_username
sickbeard.WEB_PASSWORD = web_password
if use_api == "on":
use_api = 1
else:
use_api = 0
sickbeard.USE_API = use_api
sickbeard.API_KEY = api_key
if enable_https == "on":
enable_https = 1
else:
enable_https = 0
sickbeard.ENABLE_HTTPS = enable_https
if not config.change_HTTPS_CERT(https_cert):
results += ["Unable to create directory " + os.path.normpath(https_cert) + ", https cert dir not changed."]
if not config.change_HTTPS_KEY(https_key):
results += ["Unable to create directory " + os.path.normpath(https_key) + ", https key dir not changed."]
config.change_VERSION_NOTIFY(version_notify)
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/general/")
class ConfigSearch:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_search.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveSearch(self, use_nzbs=None, use_torrents=None, nzb_dir=None, sab_username=None, sab_password=None,
sab_apikey=None, sab_category=None, sab_host=None, nzbget_password=None, nzbget_category=None, nzbget_host=None,
torrent_dir=None,torrent_method=None, nzb_method=None, usenet_retention=None, search_frequency=None, french_delay=None,
download_propers=None, download_french=None, torrent_username=None, torrent_password=None, torrent_host=None,
torrent_label=None, torrent_path=None, torrent_custom_url=None, torrent_ratio=None, torrent_paused=None, ignore_words=None,
prefered_method=None, torrent_use_ftp = None, ftp_host=None, ftp_port=None, ftp_timeout=None, ftp_passive = None, ftp_login=None,
ftp_password=None, ftp_remotedir=None):
results = []
if not config.change_NZB_DIR(nzb_dir):
results += ["Unable to create directory " + os.path.normpath(nzb_dir) + ", dir not changed."]
if not config.change_TORRENT_DIR(torrent_dir):
results += ["Unable to create directory " + os.path.normpath(torrent_dir) + ", dir not changed."]
config.change_SEARCH_FREQUENCY(search_frequency)
if download_propers == "on":
download_propers = 1
else:
download_propers = 0
if download_french == "on":
download_french = 1
else:
download_french = 0
if use_nzbs == "on":
use_nzbs = 1
else:
use_nzbs = 0
if use_torrents == "on":
use_torrents = 1
else:
use_torrents = 0
if usenet_retention == None:
usenet_retention = 200
if french_delay == None:
french_delay = 120
if ignore_words == None:
ignore_words = ""
if ftp_port == None:
ftp_port = 21
if ftp_timeout == None:
ftp_timeout = 120
sickbeard.USE_NZBS = use_nzbs
sickbeard.USE_TORRENTS = use_torrents
sickbeard.NZB_METHOD = nzb_method
sickbeard.PREFERED_METHOD = prefered_method
sickbeard.TORRENT_METHOD = torrent_method
sickbeard.USENET_RETENTION = int(usenet_retention)
sickbeard.FRENCH_DELAY = int(french_delay)
sickbeard.IGNORE_WORDS = ignore_words
sickbeard.DOWNLOAD_PROPERS = download_propers
sickbeard.DOWNLOAD_FRENCH = download_french
sickbeard.SAB_USERNAME = sab_username
sickbeard.SAB_PASSWORD = sab_password
sickbeard.SAB_APIKEY = sab_apikey.strip()
sickbeard.SAB_CATEGORY = sab_category
if sab_host and not re.match('https?://.*', sab_host):
sab_host = 'http://' + sab_host
if not sab_host.endswith('/'):
sab_host = sab_host + '/'
sickbeard.SAB_HOST = sab_host
sickbeard.NZBGET_PASSWORD = nzbget_password
sickbeard.NZBGET_CATEGORY = nzbget_category
sickbeard.NZBGET_HOST = nzbget_host
sickbeard.TORRENT_USERNAME = torrent_username
sickbeard.TORRENT_PASSWORD = torrent_password
sickbeard.TORRENT_LABEL = torrent_label
sickbeard.TORRENT_PATH = torrent_path
if torrent_custom_url == "on":
torrent_custom_url = 1
else:
torrent_custom_url = 0
sickbeard.TORRENT_CUSTOM_URL = torrent_custom_url
sickbeard.TORRENT_RATIO = torrent_ratio
if torrent_paused == "on":
torrent_paused = 1
else:
torrent_paused = 0
sickbeard.TORRENT_PAUSED = torrent_paused
if torrent_host and not re.match('https?://.*', torrent_host):
torrent_host = 'http://' + torrent_host
if not torrent_host.endswith('/'):
torrent_host = torrent_host + '/'
sickbeard.TORRENT_HOST = torrent_host
if torrent_use_ftp == "on":
torrent_use_ftp = 1
else:
torrent_use_ftp = 0
sickbeard.USE_TORRENT_FTP = torrent_use_ftp
sickbeard.FTP_HOST = ftp_host
sickbeard.FTP_PORT = ftp_port
sickbeard.FTP_TIMEOUT = ftp_timeout
if ftp_passive == "on":
ftp_passive = 1
else:
ftp_passive = 0
sickbeard.FTP_PASSIVE = ftp_passive
sickbeard.FTP_LOGIN = ftp_login
sickbeard.FTP_PASSWORD = ftp_password
sickbeard.FTP_DIR = ftp_remotedir
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/search/")
class ConfigPostProcessing:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_postProcessing.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def savePostProcessing(self, naming_pattern=None, naming_multi_ep=None,
xbmc_data=None, xbmc__frodo__data=None, mediabrowser_data=None, synology_data=None, sony_ps3_data=None, wdtv_data=None, tivo_data=None,
use_banner=None, keep_processed_dir=None, process_method=None, process_automatically=None, process_automatically_torrent=None, rename_episodes=None,
move_associated_files=None, tv_download_dir=None, torrent_download_dir=None, naming_custom_abd=None, naming_abd_pattern=None):
results = []
if not config.change_TV_DOWNLOAD_DIR(tv_download_dir):
results += ["Unable to create directory " + os.path.normpath(tv_download_dir) + ", dir not changed."]
if not config.change_TORRENT_DOWNLOAD_DIR(torrent_download_dir):
results += ["Unable to create directory " + os.path.normpath(torrent_download_dir) + ", dir not changed."]
if use_banner == "on":
use_banner = 1
else:
use_banner = 0
if process_automatically == "on":
process_automatically = 1
else:
process_automatically = 0
if process_automatically_torrent == "on":
process_automatically_torrent = 1
else:
process_automatically_torrent = 0
if rename_episodes == "on":
rename_episodes = 1
else:
rename_episodes = 0
if keep_processed_dir == "on":
keep_processed_dir = 1
else:
keep_processed_dir = 0
if move_associated_files == "on":
move_associated_files = 1
else:
move_associated_files = 0
if naming_custom_abd == "on":
naming_custom_abd = 1
else:
naming_custom_abd = 0
sickbeard.PROCESS_AUTOMATICALLY = process_automatically
sickbeard.PROCESS_AUTOMATICALLY_TORRENT = process_automatically_torrent
sickbeard.KEEP_PROCESSED_DIR = keep_processed_dir
sickbeard.PROCESS_METHOD = process_method
sickbeard.RENAME_EPISODES = rename_episodes
sickbeard.MOVE_ASSOCIATED_FILES = move_associated_files
sickbeard.NAMING_CUSTOM_ABD = naming_custom_abd
sickbeard.metadata_provider_dict['XBMC'].set_config(xbmc_data)
sickbeard.metadata_provider_dict['XBMC (Frodo)'].set_config(xbmc__frodo__data)
sickbeard.metadata_provider_dict['MediaBrowser'].set_config(mediabrowser_data)
sickbeard.metadata_provider_dict['Synology'].set_config(synology_data)
sickbeard.metadata_provider_dict['Sony PS3'].set_config(sony_ps3_data)
sickbeard.metadata_provider_dict['WDTV'].set_config(wdtv_data)
sickbeard.metadata_provider_dict['TIVO'].set_config(tivo_data)
if self.isNamingValid(naming_pattern, naming_multi_ep) != "invalid":
sickbeard.NAMING_PATTERN = naming_pattern
sickbeard.NAMING_MULTI_EP = int(naming_multi_ep)
sickbeard.NAMING_FORCE_FOLDERS = naming.check_force_season_folders()
else:
results.append("You tried saving an invalid naming config, not saving your naming settings")
if self.isNamingValid(naming_abd_pattern, None, True) != "invalid":
sickbeard.NAMING_ABD_PATTERN = naming_abd_pattern
elif naming_custom_abd:
results.append("You tried saving an invalid air-by-date naming config, not saving your air-by-date settings")
sickbeard.USE_BANNER = use_banner
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/postProcessing/")
@cherrypy.expose
def testNaming(self, pattern=None, multi=None, abd=False):
if multi != None:
multi = int(multi)
result = naming.test_name(pattern, multi, abd)
result = ek.ek(os.path.join, result['dir'], result['name'])
return result
@cherrypy.expose
def isNamingValid(self, pattern=None, multi=None, abd=False):
if pattern == None:
return "invalid"
# air by date shows just need one check, we don't need to worry about season folders
if abd:
is_valid = naming.check_valid_abd_naming(pattern)
require_season_folders = False
else:
# check validity of single and multi ep cases for the whole path
is_valid = naming.check_valid_naming(pattern, multi)
# check validity of single and multi ep cases for only the file name
require_season_folders = naming.check_force_season_folders(pattern, multi)
if is_valid and not require_season_folders:
return "valid"
elif is_valid and require_season_folders:
return "seasonfolders"
else:
return "invalid"
class ConfigProviders:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_providers.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def canAddNewznabProvider(self, name):
if not name:
return json.dumps({'error': 'Invalid name specified'})
providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
tempProvider = newznab.NewznabProvider(name, '')
if tempProvider.getID() in providerDict:
return json.dumps({'error': 'Exists as '+providerDict[tempProvider.getID()].name})
else:
return json.dumps({'success': tempProvider.getID()})
@cherrypy.expose
def saveNewznabProvider(self, name, url, key=''):
if not name or not url:
return '0'
if not url.endswith('/'):
url = url + '/'
providerDict = dict(zip([x.name for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
if name in providerDict:
if not providerDict[name].default:
providerDict[name].name = name
providerDict[name].url = url
providerDict[name].key = key
return providerDict[name].getID() + '|' + providerDict[name].configStr()
else:
newProvider = newznab.NewznabProvider(name, url, key)
sickbeard.newznabProviderList.append(newProvider)
return newProvider.getID() + '|' + newProvider.configStr()
@cherrypy.expose
def deleteNewznabProvider(self, id):
providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
if id not in providerDict or providerDict[id].default:
return '0'
# delete it from the list
sickbeard.newznabProviderList.remove(providerDict[id])
if id in sickbeard.PROVIDER_ORDER:
sickbeard.PROVIDER_ORDER.remove(id)
return '1'
@cherrypy.expose
def saveProviders(self, nzbmatrix_username=None, nzbmatrix_apikey=None,
nzbs_r_us_uid=None, nzbs_r_us_hash=None, newznab_string='',
omgwtfnzbs_uid=None, omgwtfnzbs_key=None,
tvtorrents_digest=None, tvtorrents_hash=None,
torrentleech_key=None,
btn_api_key=None,
newzbin_username=None, newzbin_password=None,t411_username=None,t411_password=None,ftdb_username=None,ftdb_password=None,addict_username=None,addict_password=None,fnt_username=None,fnt_password=None,libertalia_username=None,libertalia_password=None,xthor_username=None,xthor_password=None,thinkgeek_username=None,thinkgeek_password=None,
ethor_key=None,
provider_order=None):
results = []
provider_str_list = provider_order.split()
provider_list = []
newznabProviderDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
finishedNames = []
# add all the newznab info we got into our list
for curNewznabProviderStr in newznab_string.split('!!!'):
if not curNewznabProviderStr:
continue
curName, curURL, curKey = curNewznabProviderStr.split('|')
newProvider = newznab.NewznabProvider(curName, curURL, curKey)
curID = newProvider.getID()
# if it already exists then update it
if curID in newznabProviderDict:
newznabProviderDict[curID].name = curName
newznabProviderDict[curID].url = curURL
newznabProviderDict[curID].key = curKey
else:
sickbeard.newznabProviderList.append(newProvider)
finishedNames.append(curID)
# delete anything that is missing
for curProvider in sickbeard.newznabProviderList:
if curProvider.getID() not in finishedNames:
sickbeard.newznabProviderList.remove(curProvider)
# do the enable/disable
for curProviderStr in provider_str_list:
curProvider, curEnabled = curProviderStr.split(':')
curEnabled = int(curEnabled)
provider_list.append(curProvider)
if curProvider == 'nzbs_r_us':
sickbeard.NZBSRUS = curEnabled
elif curProvider == 'nzbs_org_old':
sickbeard.NZBS = curEnabled
elif curProvider == 'nzbmatrix':
sickbeard.NZBMATRIX = curEnabled
elif curProvider == 'newzbin':
sickbeard.NEWZBIN = curEnabled
elif curProvider == 'bin_req':
sickbeard.BINREQ = curEnabled
elif curProvider == 'womble_s_index':
sickbeard.WOMBLE = curEnabled
elif curProvider == 'nzbx':
sickbeard.NZBX = curEnabled
elif curProvider == 'omgwtfnzbs':
sickbeard.OMGWTFNZBS = curEnabled
elif curProvider == 'ezrss':
sickbeard.EZRSS = curEnabled
elif curProvider == 'tvtorrents':
sickbeard.TVTORRENTS = curEnabled
elif curProvider == 'torrentleech':
sickbeard.TORRENTLEECH = curEnabled
elif curProvider == 'btn':
sickbeard.BTN = curEnabled
elif curProvider == 'binnewz':
sickbeard.BINNEWZ = curEnabled
elif curProvider == 't411':
sickbeard.T411 = curEnabled
elif curProvider == 'ftdb':
sickbeard.FTDB = curEnabled
elif curProvider == 'addict':
sickbeard.ADDICT = curEnabled
elif curProvider == 'fnt':
sickbeard.FNT = curEnabled
elif curProvider == 'libertalia':
sickbeard.LIBERTALIA = curEnabled
elif curProvider == 'xthor':
sickbeard.XTHOR = curEnabled
elif curProvider == 'thinkgeek':
sickbeard.THINKGEEK = curEnabled
elif curProvider == 'cpasbien':
sickbeard.Cpasbien = curEnabled
elif curProvider == 'kat':
sickbeard.kat = curEnabled
elif curProvider == 'piratebay':
sickbeard.THEPIRATEBAY = curEnabled
elif curProvider == 'ethor':
sickbeard.ETHOR = curEnabled
elif curProvider in newznabProviderDict:
newznabProviderDict[curProvider].enabled = bool(curEnabled)
else:
logger.log(u"don't know what " + curProvider + " is, skipping")
sickbeard.TVTORRENTS_DIGEST = tvtorrents_digest.strip()
sickbeard.TVTORRENTS_HASH = tvtorrents_hash.strip()
sickbeard.TORRENTLEECH_KEY = torrentleech_key.strip()
sickbeard.ETHOR_KEY = ethor_key.strip()
sickbeard.BTN_API_KEY = btn_api_key.strip()
sickbeard.T411_USERNAME = t411_username
sickbeard.T411_PASSWORD = t411_password
sickbeard.FTDB_USERNAME = ftdb_username
sickbeard.FTDB_PASSWORD = ftdb_password
sickbeard.ADDICT_USERNAME = addict_username
sickbeard.ADDICT_PASSWORD = addict_password
sickbeard.FNT_USERNAME = fnt_username
sickbeard.FNT_PASSWORD = fnt_password
sickbeard.LIBERTALIA_USERNAME = libertalia_username
sickbeard.LIBERTALIA_PASSWORD = libertalia_password
sickbeard.XTHOR_USERNAME = xthor_username
sickbeard.XTHOR_PASSWORD = xthor_password
sickbeard.THINKGEEK_USERNAME = thinkgeek_username
sickbeard.THINKGEEK_PASSWORD = thinkgeek_password
sickbeard.NZBSRUS_UID = nzbs_r_us_uid.strip()
sickbeard.NZBSRUS_HASH = nzbs_r_us_hash.strip()
sickbeard.OMGWTFNZBS_UID = omgwtfnzbs_uid.strip()
sickbeard.OMGWTFNZBS_KEY = omgwtfnzbs_key.strip()
sickbeard.PROVIDER_ORDER = provider_list
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/providers/")
class ConfigNotifications:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_notifications.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveNotifications(self, use_xbmc=None, xbmc_notify_onsnatch=None, xbmc_notify_ondownload=None, xbmc_update_onlyfirst=None, xbmc_notify_onsubtitledownload=None,
xbmc_update_library=None, xbmc_update_full=None, xbmc_host=None, xbmc_username=None, xbmc_password=None,
use_plex=None, plex_notify_onsnatch=None, plex_notify_ondownload=None, plex_notify_onsubtitledownload=None, plex_update_library=None,
plex_server_host=None, plex_host=None, plex_username=None, plex_password=None,
use_growl=None, growl_notify_onsnatch=None, growl_notify_ondownload=None, growl_notify_onsubtitledownload=None, growl_host=None, growl_password=None,
use_prowl=None, prowl_notify_onsnatch=None, prowl_notify_ondownload=None, prowl_notify_onsubtitledownload=None, prowl_api=None, prowl_priority=0,
use_twitter=None, twitter_notify_onsnatch=None, twitter_notify_ondownload=None, twitter_notify_onsubtitledownload=None,
use_boxcar=None, boxcar_notify_onsnatch=None, boxcar_notify_ondownload=None, boxcar_notify_onsubtitledownload=None, boxcar_username=None,
use_boxcar2=None, boxcar2_notify_onsnatch=None, boxcar2_notify_ondownload=None, boxcar2_notify_onsubtitledownload=None, boxcar2_access_token=None, boxcar2_sound=None,
use_pushover=None, pushover_notify_onsnatch=None, pushover_notify_ondownload=None, pushover_notify_onsubtitledownload=None, pushover_userkey=None, pushover_prio=None,
use_libnotify=None, libnotify_notify_onsnatch=None, libnotify_notify_ondownload=None, libnotify_notify_onsubtitledownload=None,
use_nmj=None, nmj_host=None, nmj_database=None, nmj_mount=None, use_synoindex=None,
use_nmjv2=None, nmjv2_host=None, nmjv2_dbloc=None, nmjv2_database=None,
use_trakt=None, trakt_username=None, trakt_password=None, trakt_api=None,trakt_remove_watchlist=None,trakt_use_watchlist=None,trakt_start_paused=None,trakt_method_add=None,
use_betaseries=None, betaseries_username=None, betaseries_password=None,
use_synologynotifier=None, synologynotifier_notify_onsnatch=None, synologynotifier_notify_ondownload=None, synologynotifier_notify_onsubtitledownload=None,
use_pytivo=None, pytivo_notify_onsnatch=None, pytivo_notify_ondownload=None, pytivo_notify_onsubtitledownload=None, pytivo_update_library=None,
pytivo_host=None, pytivo_share_name=None, pytivo_tivo_name=None,
use_nma=None, nma_notify_onsnatch=None, nma_notify_ondownload=None, nma_notify_onsubtitledownload=None, nma_api=None, nma_priority=0,
use_pushalot=None, pushalot_notify_onsnatch=None, pushalot_notify_ondownload=None, pushalot_notify_onsubtitledownload=None, pushalot_authorizationtoken=None,
use_pushbullet=None, pushbullet_notify_onsnatch=None, pushbullet_notify_ondownload=None, pushbullet_notify_onsubtitledownload=None, pushbullet_api=None, pushbullet_device=None, pushbullet_device_list=None, pushbullet_channel_list=None,
use_mail=None, mail_username=None, mail_password=None, mail_server=None, mail_ssl=None, mail_from=None, mail_to=None, mail_notify_onsnatch=None ):
results = []
if xbmc_notify_onsnatch == "on":
xbmc_notify_onsnatch = 1
else:
xbmc_notify_onsnatch = 0
if xbmc_notify_ondownload == "on":
xbmc_notify_ondownload = 1
else:
xbmc_notify_ondownload = 0
if xbmc_notify_onsubtitledownload == "on":
xbmc_notify_onsubtitledownload = 1
else:
xbmc_notify_onsubtitledownload = 0
if xbmc_update_library == "on":
xbmc_update_library = 1
else:
xbmc_update_library = 0
if xbmc_update_full == "on":
xbmc_update_full = 1
else:
xbmc_update_full = 0
if xbmc_update_onlyfirst == "on":
xbmc_update_onlyfirst = 1
else:
xbmc_update_onlyfirst = 0
if use_xbmc == "on":
use_xbmc = 1
else:
use_xbmc = 0
if plex_update_library == "on":
plex_update_library = 1
else:
plex_update_library = 0
if plex_notify_onsnatch == "on":
plex_notify_onsnatch = 1
else:
plex_notify_onsnatch = 0
if plex_notify_ondownload == "on":
plex_notify_ondownload = 1
else:
plex_notify_ondownload = 0
if plex_notify_onsubtitledownload == "on":
plex_notify_onsubtitledownload = 1
else:
plex_notify_onsubtitledownload = 0
if use_plex == "on":
use_plex = 1
else:
use_plex = 0
if growl_notify_onsnatch == "on":
growl_notify_onsnatch = 1
else:
growl_notify_onsnatch = 0
if growl_notify_ondownload == "on":
growl_notify_ondownload = 1
else:
growl_notify_ondownload = 0
if growl_notify_onsubtitledownload == "on":
growl_notify_onsubtitledownload = 1
else:
growl_notify_onsubtitledownload = 0
if use_growl == "on":
use_growl = 1
else:
use_growl = 0
if prowl_notify_onsnatch == "on":
prowl_notify_onsnatch = 1
else:
prowl_notify_onsnatch = 0
if prowl_notify_ondownload == "on":
prowl_notify_ondownload = 1
else:
prowl_notify_ondownload = 0
if prowl_notify_onsubtitledownload == "on":
prowl_notify_onsubtitledownload = 1
else:
prowl_notify_onsubtitledownload = 0
if use_prowl == "on":
use_prowl = 1
else:
use_prowl = 0
if twitter_notify_onsnatch == "on":
twitter_notify_onsnatch = 1
else:
twitter_notify_onsnatch = 0
if twitter_notify_ondownload == "on":
twitter_notify_ondownload = 1
else:
twitter_notify_ondownload = 0
if twitter_notify_onsubtitledownload == "on":
twitter_notify_onsubtitledownload = 1
else:
twitter_notify_onsubtitledownload = 0
if use_twitter == "on":
use_twitter = 1
else:
use_twitter = 0
if boxcar_notify_onsnatch == "on":
boxcar_notify_onsnatch = 1
else:
boxcar_notify_onsnatch = 0
if boxcar_notify_ondownload == "on":
boxcar_notify_ondownload = 1
else:
boxcar_notify_ondownload = 0
if boxcar_notify_onsubtitledownload == "on":
boxcar_notify_onsubtitledownload = 1
else:
boxcar_notify_onsubtitledownload = 0
if use_boxcar == "on":
use_boxcar = 1
else:
use_boxcar = 0
if pushover_notify_onsnatch == "on":
pushover_notify_onsnatch = 1
else:
pushover_notify_onsnatch = 0
if pushover_notify_ondownload == "on":
pushover_notify_ondownload = 1
else:
pushover_notify_ondownload = 0
if pushover_notify_onsubtitledownload == "on":
pushover_notify_onsubtitledownload = 1
else:
pushover_notify_onsubtitledownload = 0
if use_pushover == "on":
use_pushover = 1
else:
use_pushover = 0
if use_nmj == "on":
use_nmj = 1
else:
use_nmj = 0
if use_synoindex == "on":
use_synoindex = 1
else:
use_synoindex = 0
if use_synologynotifier == "on":
use_synologynotifier = 1
else:
use_synologynotifier = 0
if synologynotifier_notify_onsnatch == "on":
synologynotifier_notify_onsnatch = 1
else:
synologynotifier_notify_onsnatch = 0
if synologynotifier_notify_ondownload == "on":
synologynotifier_notify_ondownload = 1
else:
synologynotifier_notify_ondownload = 0
if synologynotifier_notify_onsubtitledownload == "on":
synologynotifier_notify_onsubtitledownload = 1
else:
synologynotifier_notify_onsubtitledownload = 0
if use_nmjv2 == "on":
use_nmjv2 = 1
else:
use_nmjv2 = 0
if use_trakt == "on":
use_trakt = 1
else:
use_trakt = 0
if trakt_remove_watchlist == "on":
trakt_remove_watchlist = 1
else:
trakt_remove_watchlist = 0
if trakt_use_watchlist == "on":
trakt_use_watchlist = 1
else:
trakt_use_watchlist = 0
if trakt_start_paused == "on":
trakt_start_paused = 1
else:
trakt_start_paused = 0
if use_betaseries == "on":
use_betaseries = 1
else:
use_betaseries = 0
if use_pytivo == "on":
use_pytivo = 1
else:
use_pytivo = 0
if pytivo_notify_onsnatch == "on":
pytivo_notify_onsnatch = 1
else:
pytivo_notify_onsnatch = 0
if pytivo_notify_ondownload == "on":
pytivo_notify_ondownload = 1
else:
pytivo_notify_ondownload = 0
if pytivo_notify_onsubtitledownload == "on":
pytivo_notify_onsubtitledownload = 1
else:
pytivo_notify_onsubtitledownload = 0
if pytivo_update_library == "on":
pytivo_update_library = 1
else:
pytivo_update_library = 0
if use_nma == "on":
use_nma = 1
else:
use_nma = 0
if nma_notify_onsnatch == "on":
nma_notify_onsnatch = 1
else:
nma_notify_onsnatch = 0
if nma_notify_ondownload == "on":
nma_notify_ondownload = 1
else:
nma_notify_ondownload = 0
if nma_notify_onsubtitledownload == "on":
nma_notify_onsubtitledownload = 1
else:
nma_notify_onsubtitledownload = 0
if use_mail == "on":
use_mail = 1
else:
use_mail = 0
if mail_ssl == "on":
mail_ssl = 1
else:
mail_ssl = 0
if mail_notify_onsnatch == "on":
mail_notify_onsnatch = 1
else:
mail_notify_onsnatch = 0
if use_pushalot == "on":
use_pushalot = 1
else:
use_pushalot = 0
if pushalot_notify_onsnatch == "on":
pushalot_notify_onsnatch = 1
else:
pushalot_notify_onsnatch = 0
if pushalot_notify_ondownload == "on":
pushalot_notify_ondownload = 1
else:
pushalot_notify_ondownload = 0
if pushalot_notify_onsubtitledownload == "on":
pushalot_notify_onsubtitledownload = 1
else:
pushalot_notify_onsubtitledownload = 0
if use_pushbullet == "on":
use_pushbullet = 1
else:
use_pushbullet = 0
if pushbullet_notify_onsnatch == "on":
pushbullet_notify_onsnatch = 1
else:
pushbullet_notify_onsnatch = 0
if pushbullet_notify_ondownload == "on":
pushbullet_notify_ondownload = 1
else:
pushbullet_notify_ondownload = 0
if pushbullet_notify_onsubtitledownload == "on":
pushbullet_notify_onsubtitledownload = 1
else:
pushbullet_notify_onsubtitledownload = 0
if use_boxcar2=="on":
use_boxcar2=1
else:
use_boxcar2=0
if boxcar2_notify_onsnatch == "on":
boxcar2_notify_onsnatch = 1
else:
boxcar2_notify_onsnatch = 0
if boxcar2_notify_ondownload == "on":
boxcar2_notify_ondownload = 1
else:
boxcar2_notify_ondownload = 0
if boxcar2_notify_onsubtitledownload == "on":
boxcar2_notify_onsubtitledownload = 1
else:
boxcar2_notify_onsubtitledownload = 0
sickbeard.USE_XBMC = use_xbmc
sickbeard.XBMC_NOTIFY_ONSNATCH = xbmc_notify_onsnatch
sickbeard.XBMC_NOTIFY_ONDOWNLOAD = xbmc_notify_ondownload
sickbeard.XBMC_NOTIFY_ONSUBTITLEDOWNLOAD = xbmc_notify_onsubtitledownload
sickbeard.XBMC_UPDATE_LIBRARY = xbmc_update_library
sickbeard.XBMC_UPDATE_FULL = xbmc_update_full
sickbeard.XBMC_UPDATE_ONLYFIRST = xbmc_update_onlyfirst
sickbeard.XBMC_HOST = xbmc_host
sickbeard.XBMC_USERNAME = xbmc_username
sickbeard.XBMC_PASSWORD = xbmc_password
sickbeard.USE_PLEX = use_plex
sickbeard.PLEX_NOTIFY_ONSNATCH = plex_notify_onsnatch
sickbeard.PLEX_NOTIFY_ONDOWNLOAD = plex_notify_ondownload
sickbeard.PLEX_NOTIFY_ONSUBTITLEDOWNLOAD = plex_notify_onsubtitledownload
sickbeard.PLEX_UPDATE_LIBRARY = plex_update_library
sickbeard.PLEX_HOST = plex_host
sickbeard.PLEX_SERVER_HOST = plex_server_host
sickbeard.PLEX_USERNAME = plex_username
sickbeard.PLEX_PASSWORD = plex_password
sickbeard.USE_GROWL = use_growl
sickbeard.GROWL_NOTIFY_ONSNATCH = growl_notify_onsnatch
sickbeard.GROWL_NOTIFY_ONDOWNLOAD = growl_notify_ondownload
sickbeard.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD = growl_notify_onsubtitledownload
sickbeard.GROWL_HOST = growl_host
sickbeard.GROWL_PASSWORD = growl_password
sickbeard.USE_PROWL = use_prowl
sickbeard.PROWL_NOTIFY_ONSNATCH = prowl_notify_onsnatch
sickbeard.PROWL_NOTIFY_ONDOWNLOAD = prowl_notify_ondownload
sickbeard.PROWL_NOTIFY_ONSUBTITLEDOWNLOAD = prowl_notify_onsubtitledownload
sickbeard.PROWL_API = prowl_api
sickbeard.PROWL_PRIORITY = prowl_priority
sickbeard.USE_TWITTER = use_twitter
sickbeard.TWITTER_NOTIFY_ONSNATCH = twitter_notify_onsnatch
sickbeard.TWITTER_NOTIFY_ONDOWNLOAD = twitter_notify_ondownload
sickbeard.TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD = twitter_notify_onsubtitledownload
sickbeard.USE_BOXCAR = use_boxcar
sickbeard.BOXCAR_NOTIFY_ONSNATCH = boxcar_notify_onsnatch
sickbeard.BOXCAR_NOTIFY_ONDOWNLOAD = boxcar_notify_ondownload
sickbeard.BOXCAR_NOTIFY_ONSUBTITLEDOWNLOAD = boxcar_notify_onsubtitledownload
sickbeard.BOXCAR_USERNAME = boxcar_username
sickbeard.USE_BOXCAR2 = use_boxcar2
sickbeard.BOXCAR2_NOTIFY_ONSNATCH = boxcar2_notify_onsnatch
sickbeard.BOXCAR2_NOTIFY_ONDOWNLOAD = boxcar2_notify_ondownload
sickbeard.BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD = boxcar2_notify_onsubtitledownload
sickbeard.BOXCAR2_ACCESS_TOKEN = boxcar2_access_token
sickbeard.BOXCAR2_SOUND = boxcar2_sound
sickbeard.USE_PUSHOVER = use_pushover
sickbeard.PUSHOVER_NOTIFY_ONSNATCH = pushover_notify_onsnatch
sickbeard.PUSHOVER_NOTIFY_ONDOWNLOAD = pushover_notify_ondownload
sickbeard.PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD = pushover_notify_onsubtitledownload
sickbeard.PUSHOVER_USERKEY = pushover_userkey
sickbeard.PUSHOVER_PRIO = pushover_prio
sickbeard.USE_LIBNOTIFY = use_libnotify == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONSNATCH = libnotify_notify_onsnatch == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONDOWNLOAD = libnotify_notify_ondownload == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD = libnotify_notify_onsubtitledownload == "on"
sickbeard.USE_NMJ = use_nmj
sickbeard.NMJ_HOST = nmj_host
sickbeard.NMJ_DATABASE = nmj_database
sickbeard.NMJ_MOUNT = nmj_mount
sickbeard.USE_SYNOINDEX = use_synoindex
sickbeard.USE_SYNOLOGYNOTIFIER = use_synologynotifier
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH = synologynotifier_notify_onsnatch
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD = synologynotifier_notify_ondownload
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD = synologynotifier_notify_onsubtitledownload
sickbeard.USE_NMJv2 = use_nmjv2
sickbeard.NMJv2_HOST = nmjv2_host
sickbeard.NMJv2_DATABASE = nmjv2_database
sickbeard.NMJv2_DBLOC = nmjv2_dbloc
sickbeard.USE_TRAKT = use_trakt
sickbeard.TRAKT_USERNAME = trakt_username
sickbeard.TRAKT_PASSWORD = trakt_password
sickbeard.TRAKT_API = trakt_api
sickbeard.TRAKT_REMOVE_WATCHLIST = trakt_remove_watchlist
sickbeard.TRAKT_USE_WATCHLIST = trakt_use_watchlist
sickbeard.TRAKT_METHOD_ADD = trakt_method_add
sickbeard.TRAKT_START_PAUSED = trakt_start_paused
sickbeard.USE_BETASERIES = use_betaseries
sickbeard.BETASERIES_USERNAME = betaseries_username
sickbeard.BETASERIES_PASSWORD = betaseries_password
sickbeard.USE_PYTIVO = use_pytivo
sickbeard.PYTIVO_NOTIFY_ONSNATCH = pytivo_notify_onsnatch == "off"
sickbeard.PYTIVO_NOTIFY_ONDOWNLOAD = pytivo_notify_ondownload == "off"
sickbeard.PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD = pytivo_notify_onsubtitledownload == "off"
sickbeard.PYTIVO_UPDATE_LIBRARY = pytivo_update_library
sickbeard.PYTIVO_HOST = pytivo_host
sickbeard.PYTIVO_SHARE_NAME = pytivo_share_name
sickbeard.PYTIVO_TIVO_NAME = pytivo_tivo_name
sickbeard.USE_NMA = use_nma
sickbeard.NMA_NOTIFY_ONSNATCH = nma_notify_onsnatch
sickbeard.NMA_NOTIFY_ONDOWNLOAD = nma_notify_ondownload
sickbeard.NMA_NOTIFY_ONSUBTITLEDOWNLOAD = nma_notify_onsubtitledownload
sickbeard.NMA_API = nma_api
sickbeard.NMA_PRIORITY = nma_priority
sickbeard.USE_MAIL = use_mail
sickbeard.MAIL_USERNAME = mail_username
sickbeard.MAIL_PASSWORD = mail_password
sickbeard.MAIL_SERVER = mail_server
sickbeard.MAIL_SSL = mail_ssl
sickbeard.MAIL_FROM = mail_from
sickbeard.MAIL_TO = mail_to
sickbeard.MAIL_NOTIFY_ONSNATCH = mail_notify_onsnatch
sickbeard.USE_PUSHALOT = use_pushalot
sickbeard.PUSHALOT_NOTIFY_ONSNATCH = pushalot_notify_onsnatch
sickbeard.PUSHALOT_NOTIFY_ONDOWNLOAD = pushalot_notify_ondownload
sickbeard.PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD = pushalot_notify_onsubtitledownload
sickbeard.PUSHALOT_AUTHORIZATIONTOKEN = pushalot_authorizationtoken
sickbeard.USE_PUSHBULLET = use_pushbullet
sickbeard.PUSHBULLET_NOTIFY_ONSNATCH = pushbullet_notify_onsnatch
sickbeard.PUSHBULLET_NOTIFY_ONDOWNLOAD = pushbullet_notify_ondownload
sickbeard.PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD = pushbullet_notify_onsubtitledownload
sickbeard.PUSHBULLET_API = pushbullet_api
sickbeard.PUSHBULLET_DEVICE = pushbullet_device_list
sickbeard.PUSHBULLET_CHANNEL = pushbullet_channel_list
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/notifications/")
class ConfigSubtitles:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_subtitles.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveSubtitles(self, use_subtitles=None, subsnewasold=None, subtitles_plugins=None, subtitles_languages=None, subtitles_dir=None, subtitles_dir_sub=None, subsnolang = None, service_order=None, subtitles_history=None, subtitles_clean_hi=None, subtitles_clean_team=None, subtitles_clean_music=None, subtitles_clean_punc=None):
results = []
if use_subtitles == "on":
use_subtitles = 1
if sickbeard.subtitlesFinderScheduler.thread == None or not sickbeard.subtitlesFinderScheduler.thread.isAlive():
sickbeard.subtitlesFinderScheduler.initThread()
else:
use_subtitles = 0
sickbeard.subtitlesFinderScheduler.abort = True
logger.log(u"Waiting for the SUBTITLESFINDER thread to exit")
try:
sickbeard.subtitlesFinderScheduler.thread.join(5)
except:
pass
if subtitles_history == "on":
subtitles_history = 1
else:
subtitles_history = 0
if subtitles_dir_sub == "on":
subtitles_dir_sub = 1
else:
subtitles_dir_sub = 0
if subsnewasold == "on":
subsnewasold = 1
else:
subsnewasold = 0
if subsnolang == "on":
subsnolang = 1
else:
subsnolang = 0
sickbeard.USE_SUBTITLES = use_subtitles
sickbeard.SUBSNEWASOLD = subsnewasold
sickbeard.SUBTITLES_LANGUAGES = [lang.alpha2 for lang in subtitles.isValidLanguage(subtitles_languages.replace(' ', '').split(','))] if subtitles_languages != '' else ''
sickbeard.SUBTITLES_DIR = subtitles_dir
sickbeard.SUBTITLES_DIR_SUB = subtitles_dir_sub
sickbeard.SUBSNOLANG = subsnolang
sickbeard.SUBTITLES_HISTORY = subtitles_history
# Subtitles services
services_str_list = service_order.split()
subtitles_services_list = []
subtitles_services_enabled = []
for curServiceStr in services_str_list:
curService, curEnabled = curServiceStr.split(':')
subtitles_services_list.append(curService)
subtitles_services_enabled.append(int(curEnabled))
sickbeard.SUBTITLES_SERVICES_LIST = subtitles_services_list
sickbeard.SUBTITLES_SERVICES_ENABLED = subtitles_services_enabled
#Subtitles Cleansing
if subtitles_clean_hi == "on":
subtitles_clean_hi = 1
else:
subtitles_clean_hi = 0
if subtitles_clean_team == "on":
subtitles_clean_team = 1
else:
subtitles_clean_team = 0
if subtitles_clean_music == "on":
subtitles_clean_music = 1
else:
subtitles_clean_music = 0
if subtitles_clean_punc == "on":
subtitles_clean_punc = 1
else:
subtitles_clean_punc = 0
sickbeard.SUBTITLES_CLEAN_HI = subtitles_clean_hi
sickbeard.SUBTITLES_CLEAN_TEAM = subtitles_clean_team
sickbeard.SUBTITLES_CLEAN_MUSIC = subtitles_clean_music
sickbeard.SUBTITLES_CLEAN_PUNC = subtitles_clean_punc
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/subtitles/")
class Config:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config.tmpl")
t.submenu = ConfigMenu
return _munge(t)
general = ConfigGeneral()
search = ConfigSearch()
postProcessing = ConfigPostProcessing()
providers = ConfigProviders()
notifications = ConfigNotifications()
subtitles = ConfigSubtitles()
def haveXBMC():
return sickbeard.USE_XBMC and sickbeard.XBMC_UPDATE_LIBRARY
def havePLEX():
return sickbeard.USE_PLEX and sickbeard.PLEX_UPDATE_LIBRARY
def HomeMenu():
return [
{ 'title': 'Add Shows', 'path': 'home/addShows/', },
{ 'title': 'Manual Post-Processing', 'path': 'home/postprocess/' },
{ 'title': 'Update XBMC', 'path': 'home/updateXBMC/', 'requires': haveXBMC },
{ 'title': 'Update Plex', 'path': 'home/updatePLEX/', 'requires': havePLEX },
{ 'title': 'Update', 'path': 'manage/manageSearches/forceVersionCheck', 'confirm': True},
{ 'title': 'Restart', 'path': 'home/restart/?pid='+str(sickbeard.PID), 'confirm': True },
{ 'title': 'Shutdown', 'path': 'home/shutdown/?pid='+str(sickbeard.PID), 'confirm': True },
]
class HomePostProcess:
@cherrypy.expose
def index(self):
t = PageTemplate(file="home_postprocess.tmpl")
t.submenu = HomeMenu()
return _munge(t)
@cherrypy.expose
def processEpisode(self, dir=None, nzbName=None, jobName=None, quiet=None):
if not dir:
redirect("/home/postprocess")
else:
result = processTV.processDir(dir, nzbName)
if quiet != None and int(quiet) == 1:
return result
result = result.replace("\n","<br />\n")
return _genericMessage("Postprocessing results", result)
class NewHomeAddShows:
@cherrypy.expose
def index(self):
t = PageTemplate(file="home_addShows.tmpl")
t.submenu = HomeMenu()
return _munge(t)
@cherrypy.expose
def getTVDBLanguages(self):
result = tvdb_api.Tvdb().config['valid_languages']
# Make sure list is sorted alphabetically but 'fr' is in front
if 'fr' in result:
del result[result.index('fr')]
result.sort()
result.insert(0, 'fr')
return json.dumps({'results': result})
@cherrypy.expose
def sanitizeFileName(self, name):
return helpers.sanitizeFileName(name)
@cherrypy.expose
def searchTVDBForShowName(self, name, lang="fr"):
if not lang or lang == 'null':
lang = "fr"
baseURL = "http://thetvdb.com/api/GetSeries.php?"
nameUTF8 = name.encode('utf-8')
logger.log(u"Trying to find Show on thetvdb.com with: " + nameUTF8.decode('utf-8'), logger.DEBUG)
# Use each word in the show's name as a possible search term
keywords = nameUTF8.split(' ')
# Insert the whole show's name as the first search term so best results are first
# ex: keywords = ['Some Show Name', 'Some', 'Show', 'Name']
if len(keywords) > 1:
keywords.insert(0, nameUTF8)
# Query the TVDB for each search term and build the list of results
results = []
for searchTerm in keywords:
params = {'seriesname': searchTerm,
'language': lang}
finalURL = baseURL + urllib.urlencode(params)
logger.log(u"Searching for Show with searchterm: \'" + searchTerm.decode('utf-8') + u"\' on URL " + finalURL, logger.DEBUG)
urlData = helpers.getURL(finalURL)
if urlData is None:
# When urlData is None, trouble connecting to TVDB, don't try the rest of the keywords
logger.log(u"Unable to get URL: " + finalURL, logger.ERROR)
break
else:
try:
seriesXML = etree.ElementTree(etree.XML(urlData))
series = seriesXML.getiterator('Series')
except Exception, e:
# use finalURL in log, because urlData can be too much information
logger.log(u"Unable to parse XML for some reason: " + ex(e) + " from XML: " + finalURL, logger.ERROR)
series = ''
# add each result to our list
for curSeries in series:
tvdb_id = int(curSeries.findtext('seriesid'))
# don't add duplicates
if tvdb_id in [x[0] for x in results]:
continue
results.append((tvdb_id, curSeries.findtext('SeriesName'), curSeries.findtext('FirstAired')))
lang_id = tvdb_api.Tvdb().config['langabbv_to_id'][lang]
return json.dumps({'results': results, 'langid': lang_id})
@cherrypy.expose
def massAddTable(self, rootDir=None):
t = PageTemplate(file="home_massAddTable.tmpl")
t.submenu = HomeMenu()
myDB = db.DBConnection()
if not rootDir:
return "No folders selected."
elif type(rootDir) != list:
root_dirs = [rootDir]
else:
root_dirs = rootDir
root_dirs = [urllib.unquote_plus(x) for x in root_dirs]
default_index = int(sickbeard.ROOT_DIRS.split('|')[0])
if len(root_dirs) > default_index:
tmp = root_dirs[default_index]
if tmp in root_dirs:
root_dirs.remove(tmp)
root_dirs = [tmp]+root_dirs
dir_list = []
for root_dir in root_dirs:
try:
file_list = ek.ek(os.listdir, root_dir)
except:
continue
for cur_file in file_list:
cur_path = ek.ek(os.path.normpath, ek.ek(os.path.join, root_dir, cur_file))
if not ek.ek(os.path.isdir, cur_path):
continue
cur_dir = {
'dir': cur_path,
'display_dir': '<b>'+ek.ek(os.path.dirname, cur_path)+os.sep+'</b>'+ek.ek(os.path.basename, cur_path),
}
# see if the folder is in XBMC already
dirResults = myDB.select("SELECT * FROM tv_shows WHERE location = ?", [cur_path])
if dirResults:
cur_dir['added_already'] = True
else:
cur_dir['added_already'] = False
dir_list.append(cur_dir)
tvdb_id = ''
show_name = ''
for cur_provider in sickbeard.metadata_provider_dict.values():
(tvdb_id, show_name) = cur_provider.retrieveShowMetadata(cur_path)
if tvdb_id and show_name:
break
cur_dir['existing_info'] = (tvdb_id, show_name)
if tvdb_id and helpers.findCertainShow(sickbeard.showList, tvdb_id):
cur_dir['added_already'] = True
t.dirList = dir_list
return _munge(t)
@cherrypy.expose
def newShow(self, show_to_add=None, other_shows=None):
"""
Display the new show page which collects a tvdb id, folder, and extra options and
posts them to addNewShow
"""
t = PageTemplate(file="home_newShow.tmpl")
t.submenu = HomeMenu()
show_dir, tvdb_id, show_name = self.split_extra_show(show_to_add)
if tvdb_id and show_name:
use_provided_info = True
else:
use_provided_info = False
# tell the template whether we're giving it show name & TVDB ID
t.use_provided_info = use_provided_info
# use the given show_dir for the tvdb search if available
if not show_dir:
t.default_show_name = ''
elif not show_name:
t.default_show_name = ek.ek(os.path.basename, ek.ek(os.path.normpath, show_dir)).replace('.',' ')
else:
t.default_show_name = show_name
# carry a list of other dirs if given
if not other_shows:
other_shows = []
elif type(other_shows) != list:
other_shows = [other_shows]
if use_provided_info:
t.provided_tvdb_id = tvdb_id
t.provided_tvdb_name = show_name
t.provided_show_dir = show_dir
t.other_shows = other_shows
return _munge(t)
@cherrypy.expose
def addNewShow(self, whichSeries=None, tvdbLang="fr", rootDir=None, defaultStatus=None,
anyQualities=None, bestQualities=None, flatten_folders=None, subtitles=None, fullShowPath=None,
other_shows=None, skipShow=None, audio_lang=None):
"""
Receive tvdb id, dir, and other options and create a show from them. If extra show dirs are
provided then it forwards back to newShow, if not it goes to /home.
"""
# grab our list of other dirs if given
if not other_shows:
other_shows = []
elif type(other_shows) != list:
other_shows = [other_shows]
def finishAddShow():
# if there are no extra shows then go home
if not other_shows:
redirect('/home')
# peel off the next one
next_show_dir = other_shows[0]
rest_of_show_dirs = other_shows[1:]
# go to add the next show
return self.newShow(next_show_dir, rest_of_show_dirs)
# if we're skipping then behave accordingly
if skipShow:
return finishAddShow()
# sanity check on our inputs
if (not rootDir and not fullShowPath) or not whichSeries:
return "Missing params, no tvdb id or folder:"+repr(whichSeries)+" and "+repr(rootDir)+"/"+repr(fullShowPath)
# figure out what show we're adding and where
series_pieces = whichSeries.partition('|')
if len(series_pieces) < 3:
return "Error with show selection."
tvdb_id = int(series_pieces[0])
show_name = series_pieces[2]
# use the whole path if it's given, or else append the show name to the root dir to get the full show path
if fullShowPath:
show_dir = ek.ek(os.path.normpath, fullShowPath)
else:
show_dir = ek.ek(os.path.join, rootDir, helpers.sanitizeFileName(show_name))
# blanket policy - if the dir exists you should have used "add existing show" numbnuts
if ek.ek(os.path.isdir, show_dir) and not fullShowPath:
ui.notifications.error("Unable to add show", "Folder "+show_dir+" exists already")
redirect('/home/addShows/existingShows')
# don't create show dir if config says not to
if sickbeard.ADD_SHOWS_WO_DIR:
logger.log(u"Skipping initial creation of "+show_dir+" due to config.ini setting")
else:
dir_exists = helpers.makeDir(show_dir)
if not dir_exists:
logger.log(u"Unable to create the folder "+show_dir+", can't add the show", logger.ERROR)
ui.notifications.error("Unable to add show", "Unable to create the folder "+show_dir+", can't add the show")
redirect("/home")
else:
helpers.chmodAsParent(show_dir)
# prepare the inputs for passing along
if flatten_folders == "on":
flatten_folders = 1
else:
flatten_folders = 0
if subtitles == "on":
subtitles = 1
else:
subtitles = 0
if not anyQualities:
anyQualities = []
if not bestQualities:
bestQualities = []
if type(anyQualities) != list:
anyQualities = [anyQualities]
if type(bestQualities) != list:
bestQualities = [bestQualities]
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
# add the show
sickbeard.showQueueScheduler.action.addShow(tvdb_id, show_dir, int(defaultStatus), newQuality, flatten_folders, tvdbLang, subtitles, audio_lang) #@UndefinedVariable
ui.notifications.message('Show added', 'Adding the specified show into '+show_dir)
return finishAddShow()
@cherrypy.expose
def existingShows(self):
"""
Prints out the page to add existing shows from a root dir
"""
t = PageTemplate(file="home_addExistingShow.tmpl")
t.submenu = HomeMenu()
return _munge(t)
def split_extra_show(self, extra_show):
if not extra_show:
return (None, None, None)
split_vals = extra_show.split('|')
if len(split_vals) < 3:
return (extra_show, None, None)
show_dir = split_vals[0]
tvdb_id = split_vals[1]
show_name = '|'.join(split_vals[2:])
return (show_dir, tvdb_id, show_name)
@cherrypy.expose
def addExistingShows(self, shows_to_add=None, promptForSettings=None):
"""
Receives a dir list and add them. Adds the ones with given TVDB IDs first, then forwards
along to the newShow page.
"""
# grab a list of other shows to add, if provided
if not shows_to_add:
shows_to_add = []
elif type(shows_to_add) != list:
shows_to_add = [shows_to_add]
shows_to_add = [urllib.unquote_plus(x) for x in shows_to_add]
if promptForSettings == "on":
promptForSettings = 1
else:
promptForSettings = 0
tvdb_id_given = []
dirs_only = []
# separate all the ones with TVDB IDs
for cur_dir in shows_to_add:
if not '|' in cur_dir:
dirs_only.append(cur_dir)
else:
show_dir, tvdb_id, show_name = self.split_extra_show(cur_dir)
if not show_dir or not tvdb_id or not show_name:
continue
tvdb_id_given.append((show_dir, int(tvdb_id), show_name))
# if they want me to prompt for settings then I will just carry on to the newShow page
if promptForSettings and shows_to_add:
return self.newShow(shows_to_add[0], shows_to_add[1:])
# if they don't want me to prompt for settings then I can just add all the nfo shows now
num_added = 0
for cur_show in tvdb_id_given:
show_dir, tvdb_id, show_name = cur_show
# add the show
sickbeard.showQueueScheduler.action.addShow(tvdb_id, show_dir, int(sickbeard.STATUS_DEFAULT), sickbeard.QUALITY_DEFAULT, sickbeard.FLATTEN_FOLDERS_DEFAULT,"fr", sickbeard.SUBTITLES_DEFAULT, sickbeard.AUDIO_SHOW_DEFAULT) #@UndefinedVariable
num_added += 1
if num_added:
ui.notifications.message("Shows Added", "Automatically added "+str(num_added)+" from their existing metadata files")
# if we're done then go home
if not dirs_only:
redirect('/home')
# for the remaining shows we need to prompt for each one, so forward this on to the newShow page
return self.newShow(dirs_only[0], dirs_only[1:])
ErrorLogsMenu = [
{ 'title': 'Clear Errors', 'path': 'errorlogs/clearerrors' },
#{ 'title': 'View Log', 'path': 'errorlogs/viewlog' },
]
class ErrorLogs:
@cherrypy.expose
def index(self):
t = PageTemplate(file="errorlogs.tmpl")
t.submenu = ErrorLogsMenu
return _munge(t)
@cherrypy.expose
def clearerrors(self):
classes.ErrorViewer.clear()
redirect("/errorlogs")
@cherrypy.expose
def viewlog(self, minLevel=logger.MESSAGE, maxLines=500):
t = PageTemplate(file="viewlogs.tmpl")
t.submenu = ErrorLogsMenu
minLevel = int(minLevel)
data = []
if os.path.isfile(logger.sb_log_instance.log_file):
f = open(logger.sb_log_instance.log_file)
data = f.readlines()
f.close()
regex = "^(\w+).?\-(\d\d)\s+(\d\d)\:(\d\d):(\d\d)\s+([A-Z]+)\s+(.*)$"
finalData = []
numLines = 0
lastLine = False
numToShow = min(maxLines, len(data))
for x in reversed(data):
x = x.decode('utf-8')
match = re.match(regex, x)
if match:
level = match.group(6)
if level not in logger.reverseNames:
lastLine = False
continue
if logger.reverseNames[level] >= minLevel:
lastLine = True
finalData.append(x)
else:
lastLine = False
continue
elif lastLine:
finalData.append("AA"+x)
numLines += 1
if numLines >= numToShow:
break
result = "".join(finalData)
t.logLines = result
t.minLevel = minLevel
return _munge(t)
class Home:
@cherrypy.expose
def is_alive(self, *args, **kwargs):
if 'callback' in kwargs and '_' in kwargs:
callback, _ = kwargs['callback'], kwargs['_']
else:
return "Error: Unsupported Request. Send jsonp request with 'callback' variable in the query stiring."
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
cherrypy.response.headers['Content-Type'] = 'text/javascript'
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
cherrypy.response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
if sickbeard.started:
return callback+'('+json.dumps({"msg": str(sickbeard.PID)})+');'
else:
return callback+'('+json.dumps({"msg": "nope"})+');'
@cherrypy.expose
def index(self):
t = PageTemplate(file="home.tmpl")
t.submenu = HomeMenu()
return _munge(t)
addShows = NewHomeAddShows()
postprocess = HomePostProcess()
@cherrypy.expose
def testSABnzbd(self, host=None, username=None, password=None, apikey=None):
if not host.endswith("/"):
host = host + "/"
connection, accesMsg = sab.getSabAccesMethod(host, username, password, apikey)
if connection:
authed, authMsg = sab.testAuthentication(host, username, password, apikey) #@UnusedVariable
if authed:
return "Success. Connected and authenticated"
else:
return "Authentication failed. SABnzbd expects '"+accesMsg+"' as authentication method"
else:
return "Unable to connect to host"
@cherrypy.expose
def testTorrent(self, torrent_method=None, host=None, username=None, password=None):
if not host.endswith("/"):
host = host + "/"
client = clients.getClientIstance(torrent_method)
connection, accesMsg = client(host, username, password).testAuthentication()
return accesMsg
@cherrypy.expose
def testGrowl(self, host=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.growl_notifier.test_notify(host, password)
if password==None or password=='':
pw_append = ''
else:
pw_append = " with password: " + password
if result:
return "Registered and Tested growl successfully "+urllib.unquote_plus(host)+pw_append
else:
return "Registration and Testing of growl failed "+urllib.unquote_plus(host)+pw_append
@cherrypy.expose
def testProwl(self, prowl_api=None, prowl_priority=0):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.prowl_notifier.test_notify(prowl_api, prowl_priority)
if result:
return "Test prowl notice sent successfully"
else:
return "Test prowl notice failed"
@cherrypy.expose
def testBoxcar(self, username=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.boxcar_notifier.test_notify(username)
if result:
return "Boxcar notification succeeded. Check your Boxcar clients to make sure it worked"
else:
return "Error sending Boxcar notification"
@cherrypy.expose
def testBoxcar2(self, accessToken=None, sound=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.boxcar2_notifier.test_notify(accessToken, sound)
if result:
return "Boxcar2 notification succeeded. Check your Boxcar2 clients to make sure it worked"
else:
return "Error sending Boxcar2 notification"
@cherrypy.expose
def testPushover(self, userKey=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushover_notifier.test_notify(userKey)
if result:
return "Pushover notification succeeded. Check your Pushover clients to make sure it worked"
else:
return "Error sending Pushover notification"
@cherrypy.expose
def twitterStep1(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
return notifiers.twitter_notifier._get_authorization()
@cherrypy.expose
def twitterStep2(self, key):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.twitter_notifier._get_credentials(key)
logger.log(u"result: "+str(result))
if result:
return "Key verification successful"
else:
return "Unable to verify key"
@cherrypy.expose
def testTwitter(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.twitter_notifier.test_notify()
if result:
return "Tweet successful, check your twitter to make sure it worked"
else:
return "Error sending tweet"
@cherrypy.expose
def testXBMC(self, host=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
finalResult = ''
for curHost in [x.strip() for x in host.split(",")]:
curResult = notifiers.xbmc_notifier.test_notify(urllib.unquote_plus(curHost), username, password)
if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]:
finalResult += "Test XBMC notice sent successfully to " + urllib.unquote_plus(curHost)
else:
finalResult += "Test XBMC notice failed to " + urllib.unquote_plus(curHost)
finalResult += "<br />\n"
return finalResult
@cherrypy.expose
def testPLEX(self, host=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
finalResult = ''
for curHost in [x.strip() for x in host.split(",")]:
curResult = notifiers.plex_notifier.test_notify(urllib.unquote_plus(curHost), username, password)
if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]:
finalResult += "Test Plex notice sent successfully to " + urllib.unquote_plus(curHost)
else:
finalResult += "Test Plex notice failed to " + urllib.unquote_plus(curHost)
finalResult += "<br />\n"
return finalResult
@cherrypy.expose
def testLibnotify(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
if notifiers.libnotify_notifier.test_notify():
return "Tried sending desktop notification via libnotify"
else:
return notifiers.libnotify.diagnose()
@cherrypy.expose
def testNMJ(self, host=None, database=None, mount=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmj_notifier.test_notify(urllib.unquote_plus(host), database, mount)
if result:
return "Successfull started the scan update"
else:
return "Test failed to start the scan update"
@cherrypy.expose
def settingsNMJ(self, host=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmj_notifier.notify_settings(urllib.unquote_plus(host))
if result:
return '{"message": "Got settings from %(host)s", "database": "%(database)s", "mount": "%(mount)s"}' % {"host": host, "database": sickbeard.NMJ_DATABASE, "mount": sickbeard.NMJ_MOUNT}
else:
return '{"message": "Failed! Make sure your Popcorn is on and NMJ is running. (see Log & Errors -> Debug for detailed info)", "database": "", "mount": ""}'
@cherrypy.expose
def testNMJv2(self, host=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmjv2_notifier.test_notify(urllib.unquote_plus(host))
if result:
return "Test notice sent successfully to " + urllib.unquote_plus(host)
else:
return "Test notice failed to " + urllib.unquote_plus(host)
@cherrypy.expose
def settingsNMJv2(self, host=None, dbloc=None, instance=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmjv2_notifier.notify_settings(urllib.unquote_plus(host), dbloc, instance)
if result:
return '{"message": "NMJ Database found at: %(host)s", "database": "%(database)s"}' % {"host": host, "database": sickbeard.NMJv2_DATABASE}
else:
return '{"message": "Unable to find NMJ Database at location: %(dbloc)s. Is the right location selected and PCH running?", "database": ""}' % {"dbloc": dbloc}
@cherrypy.expose
def testTrakt(self, api=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.trakt_notifier.test_notify(api, username, password)
if result:
return "Test notice sent successfully to Trakt"
else:
return "Test notice failed to Trakt"
@cherrypy.expose
def testBetaSeries(self, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.betaseries_notifier.test_notify(username, password)
if result:
return "Test notice sent successfully to BetaSeries"
else:
return "Test notice failed to BetaSeries"
@cherrypy.expose
def testMail(self, mail_from=None, mail_to=None, mail_server=None, mail_ssl=None, mail_user=None, mail_password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.mail_notifier.test_notify(mail_from, mail_to, mail_server, mail_ssl, mail_user, mail_password)
if result:
return "Mail sent"
else:
return "Can't sent mail."
@cherrypy.expose
def testNMA(self, nma_api=None, nma_priority=0):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nma_notifier.test_notify(nma_api, nma_priority)
if result:
return "Test NMA notice sent successfully"
else:
return "Test NMA notice failed"
@cherrypy.expose
def testPushalot(self, authorizationToken=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushalot_notifier.test_notify(authorizationToken)
if result:
return "Pushalot notification succeeded. Check your Pushalot clients to make sure it worked"
else:
return "Error sending Pushalot notification"
@cherrypy.expose
def testPushbullet(self, api=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushbullet_notifier.test_notify(api)
if result:
return "Pushbullet notification succeeded. Check your device to make sure it worked"
else:
return "Error sending Pushbullet notification"
@cherrypy.expose
def getPushbulletDevices(self, api=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushbullet_notifier.get_devices(api)
if result:
return result
else:
return "Error sending Pushbullet notification"
@cherrypy.expose
#get channels
def getPushbulletChannels(self, api=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushbullet_notifier.get_channels(api)
if result:
return result
else:
return "Error sending Pushbullet notification"
@cherrypy.expose
def shutdown(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
threading.Timer(2, sickbeard.invoke_shutdown).start()
title = "Shutting down"
message = "Sick Beard is shutting down..."
return _genericMessage(title, message)
@cherrypy.expose
def restart(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
t = PageTemplate(file="restart.tmpl")
t.submenu = HomeMenu()
# do a soft restart
threading.Timer(2, sickbeard.invoke_restart, [False]).start()
return _munge(t)
@cherrypy.expose
def update(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
updated = sickbeard.versionCheckScheduler.action.update() #@UndefinedVariable
if updated:
# do a hard restart
threading.Timer(2, sickbeard.invoke_restart, [False]).start()
t = PageTemplate(file="restart_bare.tmpl")
return _munge(t)
else:
return _genericMessage("Update Failed","Update wasn't successful, not restarting. Check your log for more information.")
@cherrypy.expose
def displayShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
else:
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Show not in show list")
showObj.exceptions = scene_exceptions.get_scene_exceptions(showObj.tvdbid)
myDB = db.DBConnection()
seasonResults = myDB.select(
"SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season desc",
[showObj.tvdbid]
)
sqlResults = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC",
[showObj.tvdbid]
)
t = PageTemplate(file="displayShow.tmpl")
t.submenu = [ { 'title': 'Edit', 'path': 'home/editShow?show=%d'%showObj.tvdbid } ]
try:
t.showLoc = (showObj.location, True)
except sickbeard.exceptions.ShowDirNotFoundException:
t.showLoc = (showObj._location, False)
show_message = ''
if sickbeard.showQueueScheduler.action.isBeingAdded(showObj): #@UndefinedVariable
show_message = 'This show is in the process of being downloaded from theTVDB.com - the info below is incomplete.'
elif sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
show_message = 'The information below is in the process of being updated.'
elif sickbeard.showQueueScheduler.action.isBeingRefreshed(showObj): #@UndefinedVariable
show_message = 'The episodes below are currently being refreshed from disk'
elif sickbeard.showQueueScheduler.action.isBeingSubtitled(showObj): #@UndefinedVariable
show_message = 'Currently downloading subtitles for this show'
elif sickbeard.showQueueScheduler.action.isBeingCleanedSubtitle(showObj): #@UndefinedVariable
show_message = 'Currently cleaning subtitles for this show'
elif sickbeard.showQueueScheduler.action.isInRefreshQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued to be refreshed.'
elif sickbeard.showQueueScheduler.action.isInUpdateQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued and awaiting an update.'
elif sickbeard.showQueueScheduler.action.isInSubtitleQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued and awaiting subtitles download.'
if not sickbeard.showQueueScheduler.action.isBeingAdded(showObj): #@UndefinedVariable
if not sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
t.submenu.append({ 'title': 'Delete', 'path': 'home/deleteShow?show=%d'%showObj.tvdbid, 'confirm': True })
t.submenu.append({ 'title': 'Re-scan files', 'path': 'home/refreshShow?show=%d'%showObj.tvdbid })
t.submenu.append({ 'title': 'Force Full Update', 'path': 'home/updateShow?show=%d&force=1'%showObj.tvdbid })
t.submenu.append({ 'title': 'Update show in XBMC', 'path': 'home/updateXBMC?showName=%s'%urllib.quote_plus(showObj.name.encode('utf-8')), 'requires': haveXBMC })
t.submenu.append({ 'title': 'Preview Rename', 'path': 'home/testRename?show=%d'%showObj.tvdbid })
t.submenu.append({ 'title': 'French Search', 'path': 'home/frenchSearch?show=%d'%showObj.tvdbid })
if sickbeard.USE_SUBTITLES and not sickbeard.showQueueScheduler.action.isBeingSubtitled(showObj) and not sickbeard.showQueueScheduler.action.isBeingCleanedSubtitle(showObj) and showObj.subtitles:
t.submenu.append({ 'title': 'Download Subtitles', 'path': 'home/subtitleShow?show=%d'%showObj.tvdbid })
t.submenu.append({ 'title': 'Clean Subtitles', 'path': 'home/subtitleShowClean?show=%d'%showObj.tvdbid })
t.show = showObj
t.sqlResults = sqlResults
t.seasonResults = seasonResults
t.show_message = show_message
epCounts = {}
epCats = {}
epCounts[Overview.SKIPPED] = 0
epCounts[Overview.WANTED] = 0
epCounts[Overview.QUAL] = 0
epCounts[Overview.GOOD] = 0
epCounts[Overview.UNAIRED] = 0
epCounts[Overview.SNATCHED] = 0
showSceneNumberColum = False
for curResult in sqlResults:
if not showSceneNumberColum and (isinstance(curResult["scene_season"], int) and isinstance(curResult["scene_episode"], int)):
showSceneNumberColum = True
curEpCat = showObj.getOverview(int(curResult["status"]))
epCats[str(curResult["season"])+"x"+str(curResult["episode"])] = curEpCat
epCounts[curEpCat] += 1
t.showSceneNumberColum = showSceneNumberColum
def titler(x):
if not x:
return x
if x.lower().startswith('a '):
x = x[2:]
elif x.lower().startswith('the '):
x = x[4:]
return x
t.sortedShowList = sorted(sickbeard.showList, lambda x, y: cmp(titler(x.name), titler(y.name)))
t.epCounts = epCounts
t.epCats = epCats
return _munge(t)
@cherrypy.expose
def plotDetails(self, show, season, episode):
result = db.DBConnection().action("SELECT description FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", (show, season, episode)).fetchone()
return result['description'] if result else 'Episode not found.'
@cherrypy.expose
def editShow(self, show=None, location=None, anyQualities=[], bestQualities=[], exceptions_list=[], flatten_folders=None, paused=None, frenchsearch=None, directCall=False, air_by_date=None, tvdbLang=None, audio_lang=None, subtitles=None):
if show == None:
errString = "Invalid show ID: "+str(show)
if directCall:
return [errString]
else:
return _genericMessage("Error", errString)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
errString = "Unable to find the specified show: "+str(show)
if directCall:
return [errString]
else:
return _genericMessage("Error", errString)
showObj.exceptions = scene_exceptions.get_scene_exceptions(showObj.tvdbid)
if not location and not anyQualities and not bestQualities and not flatten_folders:
t = PageTemplate(file="editShow.tmpl")
t.submenu = HomeMenu()
with showObj.lock:
t.show = showObj
return _munge(t)
if flatten_folders == "on":
flatten_folders = 1
else:
flatten_folders = 0
logger.log(u"flatten folders: "+str(flatten_folders))
if paused == "on":
paused = 1
else:
paused = 0
if frenchsearch == "on":
frenchsearch = 1
else:
frenchsearch = 0
if air_by_date == "on":
air_by_date = 1
else:
air_by_date = 0
if subtitles == "on":
subtitles = 1
else:
subtitles = 0
if tvdbLang and tvdbLang in tvdb_api.Tvdb().config['valid_languages']:
tvdb_lang = tvdbLang
else:
tvdb_lang = showObj.lang
# if we changed the language then kick off an update
if tvdb_lang == showObj.lang:
do_update = False
else:
do_update = True
if type(anyQualities) != list:
anyQualities = [anyQualities]
if type(bestQualities) != list:
bestQualities = [bestQualities]
if type(exceptions_list) != list:
exceptions_list = [exceptions_list]
#If directCall from mass_edit_update no scene exceptions handling
if directCall:
do_update_exceptions = False
else:
if set(exceptions_list) == set(showObj.exceptions):
do_update_exceptions = False
else:
do_update_exceptions = True
errors = []
with showObj.lock:
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
showObj.quality = newQuality
# reversed for now
if bool(showObj.flatten_folders) != bool(flatten_folders):
showObj.flatten_folders = flatten_folders
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh this show: "+ex(e))
showObj.paused = paused
showObj.air_by_date = air_by_date
showObj.subtitles = subtitles
showObj.frenchsearch = frenchsearch
showObj.lang = tvdb_lang
showObj.audio_lang = audio_lang
# if we change location clear the db of episodes, change it, write to db, and rescan
if os.path.normpath(showObj._location) != os.path.normpath(location):
logger.log(os.path.normpath(showObj._location)+" != "+os.path.normpath(location), logger.DEBUG)
if not ek.ek(os.path.isdir, location):
errors.append("New location <tt>%s</tt> does not exist" % location)
# don't bother if we're going to update anyway
elif not do_update:
# change it
try:
showObj.location = location
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh this show:"+ex(e))
# grab updated info from TVDB
#showObj.loadEpisodesFromTVDB()
# rescan the episodes in the new folder
except exceptions.NoNFOException:
errors.append("The folder at <tt>%s</tt> doesn't contain a tvshow.nfo - copy your files to that folder before you change the directory in Sick Beard." % location)
# save it to the DB
showObj.saveToDB()
# force the update
if do_update:
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, True) #@UndefinedVariable
time.sleep(1)
except exceptions.CantUpdateException, e:
errors.append("Unable to force an update on the show.")
if do_update_exceptions:
try:
scene_exceptions.update_scene_exceptions(showObj.tvdbid, exceptions_list) #@UndefinedVariable
time.sleep(1)
except exceptions.CantUpdateException, e:
errors.append("Unable to force an update on scene exceptions of the show.")
if directCall:
return errors
if len(errors) > 0:
ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"),
'<ul>' + '\n'.join(['<li>%s</li>' % error for error in errors]) + "</ul>")
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def deleteShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
if sickbeard.showQueueScheduler.action.isBeingAdded(showObj) or sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
return _genericMessage("Error", "Shows can't be deleted while they're being added or updated.")
showObj.deleteShow()
ui.notifications.message('<b>%s</b> has been deleted' % showObj.name)
redirect("/home")
@cherrypy.expose
def refreshShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# force the update from the DB
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
ui.notifications.error("Unable to refresh this show.",
ex(e))
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def updateShow(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# force the update
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, bool(force)) #@UndefinedVariable
except exceptions.CantUpdateException, e:
ui.notifications.error("Unable to update this show.",
ex(e))
# just give it some time
time.sleep(3)
redirect("/home/displayShow?show=" + str(showObj.tvdbid))
@cherrypy.expose
def subtitleShow(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# search and download subtitles
sickbeard.showQueueScheduler.action.downloadSubtitles(showObj, bool(force)) #@UndefinedVariable
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def subtitleShowClean(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# search and download subtitles
sickbeard.showQueueScheduler.action.cleanSubtitles(showObj, bool(force)) #@UndefinedVariable
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def frenchSearch(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# search and download subtitles
sickbeard.showQueueScheduler.action.searchFrench(showObj, bool(force)) #@UndefinedVariable
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def updateXBMC(self, showName=None):
if sickbeard.XBMC_UPDATE_ONLYFIRST:
# only send update to first host in the list -- workaround for xbmc sql backend users
host = sickbeard.XBMC_HOST.split(",")[0].strip()
else:
host = sickbeard.XBMC_HOST
if notifiers.xbmc_notifier.update_library(showName=showName):
ui.notifications.message("Library update command sent to XBMC host(s): " + host)
else:
ui.notifications.error("Unable to contact one or more XBMC host(s): " + host)
redirect('/home')
@cherrypy.expose
def updatePLEX(self):
if notifiers.plex_notifier.update_library():
ui.notifications.message("Library update command sent to Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST)
else:
ui.notifications.error("Unable to contact Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST)
redirect('/home')
@cherrypy.expose
def setStatus(self, show=None, eps=None, status=None, direct=False):
if show == None or eps == None or status == None:
errMsg = "You must specify a show and at least one episode"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
if not statusStrings.has_key(int(status)):
errMsg = "Invalid status"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
errMsg = "Error", "Show not in show list"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
segment_list = []
if eps != None:
for curEp in eps.split('|'):
logger.log(u"Attempting to set status on episode "+curEp+" to "+status, logger.DEBUG)
epInfo = curEp.split('x')
epObj = showObj.getEpisode(int(epInfo[0]), int(epInfo[1]))
if int(status) == WANTED:
# figure out what segment the episode is in and remember it so we can backlog it
if epObj.show.air_by_date:
ep_segment = str(epObj.airdate)[:7]
else:
ep_segment = epObj.season
if ep_segment not in segment_list:
segment_list.append(ep_segment)
if epObj == None:
return _genericMessage("Error", "Episode couldn't be retrieved")
with epObj.lock:
# don't let them mess up UNAIRED episodes
if epObj.status == UNAIRED:
logger.log(u"Refusing to change status of "+curEp+" because it is UNAIRED", logger.ERROR)
continue
if int(status) in Quality.DOWNLOADED and epObj.status not in Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH + Quality.DOWNLOADED + [IGNORED] and not ek.ek(os.path.isfile, epObj.location):
logger.log(u"Refusing to change status of "+curEp+" to DOWNLOADED because it's not SNATCHED/DOWNLOADED", logger.ERROR)
continue
epObj.status = int(status)
epObj.saveToDB()
msg = "Backlog was automatically started for the following seasons of <b>"+showObj.name+"</b>:<br />"
for cur_segment in segment_list:
msg += "<li>Season "+str(cur_segment)+"</li>"
logger.log(u"Sending backlog for "+showObj.name+" season "+str(cur_segment)+" because some eps were set to wanted")
cur_backlog_queue_item = search_queue.BacklogQueueItem(showObj, cur_segment)
sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item) #@UndefinedVariable
msg += "</ul>"
if segment_list:
ui.notifications.message("Backlog started", msg)
if direct:
return json.dumps({'result': 'success'})
else:
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def setAudio(self, show=None, eps=None, audio_langs=None, direct=False):
if show == None or eps == None or audio_langs == None:
errMsg = "You must specify a show and at least one episode"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Show not in show list")
try:
show_loc = showObj.location #@UnusedVariable
except exceptions.ShowDirNotFoundException:
return _genericMessage("Error", "Can't rename episodes when the show dir is missing.")
ep_obj_rename_list = []
for curEp in eps.split('|'):
logger.log(u"Attempting to set audio on episode "+curEp+" to "+audio_langs, logger.DEBUG)
epInfo = curEp.split('x')
epObj = showObj.getEpisode(int(epInfo[0]), int(epInfo[1]))
epObj.audio_langs = str(audio_langs)
epObj.saveToDB()
if direct:
return json.dumps({'result': 'success'})
else:
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def testRename(self, show=None):
if show == None:
return _genericMessage("Error", "You must specify a show")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Show not in show list")
try:
show_loc = showObj.location #@UnusedVariable
except exceptions.ShowDirNotFoundException:
return _genericMessage("Error", "Can't rename episodes when the show dir is missing.")
ep_obj_rename_list = []
ep_obj_list = showObj.getAllEpisodes(has_location=True)
for cur_ep_obj in ep_obj_list:
# Only want to rename if we have a location
if cur_ep_obj.location:
if cur_ep_obj.relatedEps:
# do we have one of multi-episodes in the rename list already
have_already = False
for cur_related_ep in cur_ep_obj.relatedEps + [cur_ep_obj]:
if cur_related_ep in ep_obj_rename_list:
have_already = True
break
if not have_already:
ep_obj_rename_list.append(cur_ep_obj)
else:
ep_obj_rename_list.append(cur_ep_obj)
if ep_obj_rename_list:
# present season DESC episode DESC on screen
ep_obj_rename_list.reverse()
t = PageTemplate(file="testRename.tmpl")
t.submenu = [{'title': 'Edit', 'path': 'home/editShow?show=%d' % showObj.tvdbid}]
t.ep_obj_list = ep_obj_rename_list
t.show = showObj
return _munge(t)
@cherrypy.expose
def doRename(self, show=None, eps=None):
if show == None or eps == None:
errMsg = "You must specify a show and at least one episode"
return _genericMessage("Error", errMsg)
show_obj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if show_obj == None:
errMsg = "Error", "Show not in show list"
return _genericMessage("Error", errMsg)
try:
show_loc = show_obj.location #@UnusedVariable
except exceptions.ShowDirNotFoundException:
return _genericMessage("Error", "Can't rename episodes when the show dir is missing.")
myDB = db.DBConnection()
if eps == None:
redirect("/home/displayShow?show=" + show)
for curEp in eps.split('|'):
epInfo = curEp.split('x')
# this is probably the worst possible way to deal with double eps but I've kinda painted myself into a corner here with this stupid database
ep_result = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ? AND 5=5", [show, epInfo[0], epInfo[1]])
if not ep_result:
logger.log(u"Unable to find an episode for "+curEp+", skipping", logger.WARNING)
continue
related_eps_result = myDB.select("SELECT * FROM tv_episodes WHERE location = ? AND episode != ?", [ep_result[0]["location"], epInfo[1]])
root_ep_obj = show_obj.getEpisode(int(epInfo[0]), int(epInfo[1]))
for cur_related_ep in related_eps_result:
related_ep_obj = show_obj.getEpisode(int(cur_related_ep["season"]), int(cur_related_ep["episode"]))
if related_ep_obj not in root_ep_obj.relatedEps:
root_ep_obj.relatedEps.append(related_ep_obj)
root_ep_obj.rename()
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def trunchistory(self, epid):
myDB = db.DBConnection()
nbep = myDB.select("Select count(*) from episode_links where episode_id=?",[epid])
myDB.action("DELETE from episode_links where episode_id=?",[epid])
messnum = str(nbep[0][0]) + ' history links deleted'
ui.notifications.message('Episode History Truncated' , messnum)
return json.dumps({'result': 'ok'})
@cherrypy.expose
def searchEpisode(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# make a queue item for it and put it on the queue
ep_queue_item = search_queue.ManualSearchQueueItem(ep_obj)
sickbeard.searchQueueScheduler.action.add_item(ep_queue_item) #@UndefinedVariable
# wait until the queue item tells us whether it worked or not
while ep_queue_item.success == None: #@UndefinedVariable
time.sleep(1)
# return the correct json value
if ep_queue_item.success:
return json.dumps({'result': statusStrings[ep_obj.status]})
return json.dumps({'result': 'failure'})
@cherrypy.expose
def searchEpisodeSubtitles(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# try do download subtitles for that episode
previous_subtitles = ep_obj.subtitles
try:
subtitles = ep_obj.downloadSubtitles()
if sickbeard.SUBTITLES_DIR:
for video in subtitles:
subs_new_path = ek.ek(os.path.join, os.path.dirname(video.path), sickbeard.SUBTITLES_DIR)
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder "+subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
for subtitle in subtitles.get(video):
new_file_path = ek.ek(os.path.join, subs_new_path, os.path.basename(subtitle.path))
helpers.moveFile(subtitle.path, new_file_path)
if sickbeard.SUBSNOLANG:
helpers.copyFile(new_file_path,new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path)
else:
if sickbeard.SUBTITLES_DIR_SUB:
for video in subtitles:
subs_new_path = os.path.join(os.path.dirname(video.path),"Subs")
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder "+subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
for subtitle in subtitles.get(video):
new_file_path = ek.ek(os.path.join, subs_new_path, os.path.basename(subtitle.path))
helpers.moveFile(subtitle.path, new_file_path)
if sickbeard.SUBSNOLANG:
helpers.copyFile(new_file_path,new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path)
else:
for video in subtitles:
for subtitle in subtitles.get(video):
if sickbeard.SUBSNOLANG:
helpers.copyFile(subtitle.path,subtitle.path[:-6]+"srt")
helpers.chmodAsParent(subtitle.path[:-6]+"srt")
helpers.chmodAsParent(subtitle.path)
except:
return json.dumps({'result': 'failure'})
# return the correct json value
if previous_subtitles != ep_obj.subtitles:
status = 'New subtitles downloaded: %s' % ' '.join(["<img src='"+sickbeard.WEB_ROOT+"/images/flags/"+subliminal.language.Language(x).alpha2+".png' alt='"+subliminal.language.Language(x).name+"'/>" for x in sorted(list(set(ep_obj.subtitles).difference(previous_subtitles)))])
else:
status = 'No subtitles downloaded'
ui.notifications.message('Subtitles Search', status)
return json.dumps({'result': status, 'subtitles': ','.join([x for x in ep_obj.subtitles])})
@cherrypy.expose
def mergeEpisodeSubtitles(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# try do merge subtitles for that episode
try:
ep_obj.mergeSubtitles()
except Exception as e:
return json.dumps({'result': 'failure', 'exception': str(e)})
# return the correct json value
status = 'Subtitles merged successfully '
ui.notifications.message('Merge Subtitles', status)
return json.dumps({'result': 'ok'})
class UI:
@cherrypy.expose
def add_message(self):
ui.notifications.message('Test 1', 'This is test number 1')
ui.notifications.error('Test 2', 'This is test number 2')
return "ok"
@cherrypy.expose
def get_messages(self):
messages = {}
cur_notification_num = 1
for cur_notification in ui.notifications.get_notifications():
messages['notification-'+str(cur_notification_num)] = {'title': cur_notification.title,
'message': cur_notification.message,
'type': cur_notification.type}
cur_notification_num += 1
return json.dumps(messages)
class WebInterface:
@cherrypy.expose
def index(self):
redirect("/home")
@cherrypy.expose
def showPoster(self, show=None, which=None):
#Redirect initial poster/banner thumb to default images
if which[0:6] == 'poster':
default_image_name = 'poster.png'
else:
default_image_name = 'banner.png'
default_image_path = ek.ek(os.path.join, sickbeard.PROG_DIR, 'data', 'images', default_image_name)
if show is None:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
else:
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj is None:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
cache_obj = image_cache.ImageCache()
if which == 'poster':
image_file_name = cache_obj.poster_path(showObj.tvdbid)
if which == 'poster_thumb':
image_file_name = cache_obj.poster_thumb_path(showObj.tvdbid)
if which == 'banner':
image_file_name = cache_obj.banner_path(showObj.tvdbid)
if which == 'banner_thumb':
image_file_name = cache_obj.banner_thumb_path(showObj.tvdbid)
if ek.ek(os.path.isfile, image_file_name):
return cherrypy.lib.static.serve_file(image_file_name, content_type="image/jpeg")
else:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
@cherrypy.expose
def setHomeLayout(self, layout):
if layout not in ('poster', 'banner', 'simple'):
layout = 'poster'
sickbeard.HOME_LAYOUT = layout
redirect("/home")
@cherrypy.expose
def setHomeSearch(self, search):
if search not in ('True', 'False'):
search = 'False'
sickbeard.TOGGLE_SEARCH= search
redirect("/home")
@cherrypy.expose
def toggleDisplayShowSpecials(self, show):
sickbeard.DISPLAY_SHOW_SPECIALS = not sickbeard.DISPLAY_SHOW_SPECIALS
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def setComingEpsLayout(self, layout):
if layout not in ('poster', 'banner', 'list'):
layout = 'banner'
sickbeard.COMING_EPS_LAYOUT = layout
redirect("/comingEpisodes")
@cherrypy.expose
def toggleComingEpsDisplayPaused(self):
sickbeard.COMING_EPS_DISPLAY_PAUSED = not sickbeard.COMING_EPS_DISPLAY_PAUSED
redirect("/comingEpisodes")
@cherrypy.expose
def setComingEpsSort(self, sort):
if sort not in ('date', 'network', 'show'):
sort = 'date'
sickbeard.COMING_EPS_SORT = sort
redirect("/comingEpisodes")
@cherrypy.expose
def comingEpisodes(self, layout="None"):
# get local timezone and load network timezones
sb_timezone = tz.tzlocal()
network_dict = network_timezones.load_network_dict()
myDB = db.DBConnection()
today1 = datetime.date.today()
today = today1.toordinal()
next_week1 = (datetime.date.today() + datetime.timedelta(days=7))
next_week = next_week1.toordinal()
recently = (datetime.date.today() - datetime.timedelta(days=sickbeard.COMING_EPS_MISSED_RANGE)).toordinal()
done_show_list = []
qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED]
sql_results1 = myDB.select("SELECT *, 0 as localtime, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season != 0 AND airdate >= ? AND airdate < ? AND tv_shows.tvdb_id = tv_episodes.showid AND tv_episodes.status NOT IN ("+','.join(['?']*len(qualList))+")", [today, next_week] + qualList)
for cur_result in sql_results1:
done_show_list.append(helpers.tryInt(cur_result["showid"]))
more_sql_results = myDB.select("SELECT *, tv_shows.status as show_status FROM tv_episodes outer_eps, tv_shows WHERE season != 0 AND showid NOT IN ("+','.join(['?']*len(done_show_list))+") AND tv_shows.tvdb_id = outer_eps.showid AND airdate IN (SELECT airdate FROM tv_episodes inner_eps WHERE inner_eps.showid = outer_eps.showid AND inner_eps.airdate >= ? AND inner_eps.status NOT IN ("+','.join(['?']*len(Quality.DOWNLOADED+Quality.SNATCHED))+") ORDER BY inner_eps.airdate ASC LIMIT 1)", done_show_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED)
sql_results1 += more_sql_results
more_sql_results = myDB.select("SELECT *, 0 as localtime, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season != 0 AND tv_shows.tvdb_id = tv_episodes.showid AND airdate < ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN ("+','.join(['?']*len(qualList))+")", [today, recently, WANTED] + qualList)
sql_results1 += more_sql_results
# sort by localtime
sorts = {
'date': (lambda x, y: cmp(x["localtime"], y["localtime"])),
'show': (lambda a, b: cmp((a["show_name"], a["localtime"]), (b["show_name"], b["localtime"]))),
'network': (lambda a, b: cmp((a["network"], a["localtime"]), (b["network"], b["localtime"]))),
}
# make a dict out of the sql results
sql_results = [dict(row) for row in sql_results1]
# regex to parse time (12/24 hour format)
time_regex = re.compile(r"(\d{1,2}):(\d{2,2})( [PA]M)?\b", flags=re.IGNORECASE)
# add localtime to the dict
for index, item in enumerate(sql_results1):
mo = time_regex.search(item['airs'])
if mo != None and len(mo.groups()) >= 2:
try:
hr = helpers.tryInt(mo.group(1))
m = helpers.tryInt(mo.group(2))
ap = mo.group(3)
# convert am/pm to 24 hour clock
if ap != None:
if ap.lower() == u" pm" and hr != 12:
hr += 12
elif ap.lower() == u" am" and hr == 12:
hr -= 12
except:
hr = 0
m = 0
else:
hr = 0
m = 0
if hr < 0 or hr > 23 or m < 0 or m > 59:
hr = 0
m = 0
te = datetime.datetime.fromordinal(helpers.tryInt(item['airdate']))
foreign_timezone = network_timezones.get_network_timezone(item['network'], network_dict, sb_timezone)
foreign_naive = datetime.datetime(te.year, te.month, te.day, hr, m,tzinfo=foreign_timezone)
sql_results[index]['localtime'] = foreign_naive.astimezone(sb_timezone)
#Normalize/Format the Airing Time
try:
locale.setlocale(locale.LC_TIME, 'us_US')
sql_results[index]['localtime_string'] = sql_results[index]['localtime'].strftime("%A %H:%M %p")
locale.setlocale(locale.LC_ALL, '') #Reseting to default locale
except:
sql_results[index]['localtime_string'] = sql_results[index]['localtime'].strftime("%A %H:%M %p")
sql_results.sort(sorts[sickbeard.COMING_EPS_SORT])
t = PageTemplate(file="comingEpisodes.tmpl")
# paused_item = { 'title': '', 'path': 'toggleComingEpsDisplayPaused' }
# paused_item['title'] = 'Hide Paused' if sickbeard.COMING_EPS_DISPLAY_PAUSED else 'Show Paused'
paused_item = { 'title': 'View Paused:', 'path': {'': ''} }
paused_item['path'] = {'Hide': 'toggleComingEpsDisplayPaused'} if sickbeard.COMING_EPS_DISPLAY_PAUSED else {'Show': 'toggleComingEpsDisplayPaused'}
t.submenu = [
{ 'title': 'Sort by:', 'path': {'Date': 'setComingEpsSort/?sort=date',
'Show': 'setComingEpsSort/?sort=show',
'Network': 'setComingEpsSort/?sort=network',
}},
{ 'title': 'Layout:', 'path': {'Banner': 'setComingEpsLayout/?layout=banner',
'Poster': 'setComingEpsLayout/?layout=poster',
'List': 'setComingEpsLayout/?layout=list',
}},
paused_item,
]
t.next_week = datetime.datetime.combine(next_week1, datetime.time(tzinfo=sb_timezone))
t.today = datetime.datetime.now().replace(tzinfo=sb_timezone)
t.sql_results = sql_results
# Allow local overriding of layout parameter
if layout and layout in ('poster', 'banner', 'list'):
t.layout = layout
else:
t.layout = sickbeard.COMING_EPS_LAYOUT
return _munge(t)
# Raw iCalendar implementation by Pedro Jose Pereira Vieito (@pvieito).
#
# iCalendar (iCal) - Standard RFC 5545 <http://tools.ietf.org/html/rfc5546>
# Works with iCloud, Google Calendar and Outlook.
@cherrypy.expose
def calendar(self):
""" Provides a subscribeable URL for iCal subscriptions
"""
logger.log(u"Receiving iCal request from %s" % cherrypy.request.remote.ip)
poster_url = cherrypy.url().replace('ical', '')
time_re = re.compile('([0-9]{1,2})\:([0-9]{2})(\ |)([AM|am|PM|pm]{2})')
# Create a iCal string
ical = 'BEGIN:VCALENDAR\n'
ical += 'VERSION:2.0\n'
ical += 'PRODID://Sick-Beard Upcoming Episodes//\n'
# Get shows info
myDB = db.DBConnection()
# Limit dates
past_date = (datetime.date.today() + datetime.timedelta(weeks=-2)).toordinal()
future_date = (datetime.date.today() + datetime.timedelta(weeks=52)).toordinal()
# Get all the shows that are not paused and are currently on air (from kjoconnor Fork)
calendar_shows = myDB.select("SELECT show_name, tvdb_id, network, airs, runtime FROM tv_shows WHERE status = 'Continuing' AND paused != '1'")
for show in calendar_shows:
# Get all episodes of this show airing between today and next month
episode_list = myDB.select("SELECT tvdbid, name, season, episode, description, airdate FROM tv_episodes WHERE airdate >= ? AND airdate < ? AND showid = ?", (past_date, future_date, int(show["tvdb_id"])))
# Get local timezone and load network timezones
local_zone = tz.tzlocal()
try:
network_zone = network_timezones.get_network_timezone(show['network'], network_timezones.load_network_dict(), local_zone)
except:
# Dummy network_zone for exceptions
network_zone = None
for episode in episode_list:
# Get the air date and time
air_date = datetime.datetime.fromordinal(int(episode['airdate']))
air_time = re.compile('([0-9]{1,2})\:([0-9]{2})(\ |)([AM|am|PM|pm]{2})').search(show["airs"])
# Parse out the air time
try:
if (air_time.group(4).lower() == 'pm' and int(air_time.group(1)) == 12):
t = datetime.time(12, int(air_time.group(2)), 0, tzinfo=network_zone)
elif (air_time.group(4).lower() == 'pm'):
t = datetime.time((int(air_time.group(1)) + 12), int(air_time.group(2)), 0, tzinfo=network_zone)
elif (air_time.group(4).lower() == 'am' and int(air_time.group(1)) == 12):
t = datetime.time(0, int(air_time.group(2)), 0, tzinfo=network_zone)
else:
t = datetime.time(int(air_time.group(1)), int(air_time.group(2)), 0, tzinfo=network_zone)
except:
# Dummy time for exceptions
t = datetime.time(22, 0, 0, tzinfo=network_zone)
# Combine air time and air date into one datetime object
air_date_time = datetime.datetime.combine(air_date, t).astimezone(local_zone)
# Create event for episode
ical = ical + 'BEGIN:VEVENT\n'
ical = ical + 'DTSTART:' + str(air_date_time.date()).replace("-", "") + '\n'
ical = ical + 'SUMMARY:' + show['show_name'] + ': ' + episode['name'] + '\n'
ical = ical + 'UID:' + str(datetime.date.today().isoformat()) + '-' + str(random.randint(10000,99999)) + '@Sick-Beard\n'
if (episode['description'] != ''):
ical = ical + 'DESCRIPTION:' + show['airs'] + ' on ' + show['network'] + '\\n\\n' + episode['description'] + '\n'
else:
ical = ical + 'DESCRIPTION:' + show['airs'] + ' on ' + show['network'] + '\n'
ical = ical + 'LOCATION:' + 'Episode ' + str(episode['episode']) + ' - Season ' + str(episode['season']) + '\n'
ical = ical + 'END:VEVENT\n'
# Ending the iCal
ical += 'END:VCALENDAR\n'
return ical
manage = Manage()
history = History()
config = Config()
home = Home()
api = Api()
browser = browser.WebFileBrowser()
errorlogs = ErrorLogs()
ui = UI()
|
nomaro/SickBeard_Backup
|
sickbeard/webserve.py
|
Python
|
gpl-3.0
| 155,575
| 0.006421
|
# coding: utf-8
# Copyright (c) 2015 Fabian Barkhau <fabian.barkhau@gmail.com>
# License: MIT (see LICENSE file)
from kivy.uix.label import Label
from gravur.utils import load_widget
@load_widget
class LabelBox(Label):
pass
|
F483/gravur
|
gravur/common/labelbox.py
|
Python
|
mit
| 232
| 0
|
# Copyright (c) 2015 Tanium Inc
#
# Generated from console.wsdl version 0.0.1
#
#
from .base import BaseType
class SavedActionApproval(BaseType):
_soap_tag = 'saved_action_approval'
def __init__(self):
BaseType.__init__(
self,
simple_properties={'id': int,
'name': str,
'approved_flag': int},
complex_properties={'metadata': MetadataList},
list_properties={},
)
self.id = None
self.name = None
self.approved_flag = None
self.metadata = None
from metadata_list import MetadataList
|
tanium/pytan
|
lib/taniumpy/object_types/saved_action_approval.py
|
Python
|
mit
| 654
| 0.009174
|
import re
import logging
from indra.resources import load_resource_json
logger = logging.getLogger(__name__)
identifiers_url = 'https://identifiers.org'
# These are just special cases of name spaces where the mapping from INDRA to
# identifiers.org is not a question of simplecapitalization.
identifiers_mappings = {
'UP': 'uniprot',
'UPPRO': 'uniprot.chain',
'UPISO': 'uniprot.isoform',
'REFSEQ_PROT': 'refseq',
'PF': 'pfam',
'IP': 'interpro',
'ECCODE': 'ec-code',
'NONCODE': 'noncodev4.rna',
'LNCRNADB': 'rnacentral',
'MIRBASEM': 'mirbase.mature',
'EGID': 'ncbigene',
'NCBI': 'ncibgene',
'HGNC_GROUP': 'hgnc.genefamily',
'LINCS': 'lincs.smallmolecule',
'PUBCHEM': 'pubchem.compound',
'CHEMBL': 'chembl.compound',
'CTD': 'ctd.chemical',
'CVCL': 'cellosaurus',
}
# These are namespaces used by INDRA that don't have corresponding
# identifiers.org entries
non_registry = {
'SDIS', 'SCHEM', 'SFAM', 'SCOMP', 'SIGNOR', 'HMS-LINCS', 'NXPFA',
'OMIM', 'LSPCI', 'UPLOC', 'BFO', 'CCLE'
}
# These are namespaces that can appear in db_refs but are actually not
# representing grounding.
non_grounding = {
'TEXT', 'TEXT_NORM'
}
# These are reverse mappings from identifiers.org namespaces to INDRA
# namespaces
identifiers_reverse = {
v: k for k, v in identifiers_mappings.items()
}
# We have to patch this one because it is ambiguous
identifiers_reverse['ncbigene'] = 'EGID'
# These are only the URLs that are strictly prefixes and not more complicated
# patterns. This is because some downstream code uses these as prefixes
# rather than arbitrary patterns.
url_prefixes = {
# Biology namespaces
'NXPFA': 'https://www.nextprot.org/term/FA-',
'SIGNOR': 'https://signor.uniroma2.it/relation_result.php?id=',
'LSPCI': 'https://labsyspharm.github.io/lspci/',
# WM namespaces
'UN': 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/',
'WDI': 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/',
'FAO': 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/',
'HUME': ('https://github.com/BBN-E/Hume/blob/master/resource/ontologies'
'/hume_ontology/'),
'CWMS': 'http://trips.ihmc.us/',
'SOFIA': 'http://cs.cmu.edu/sofia/',
}
def get_ns_from_identifiers(identifiers_ns):
""""Return a namespace compatible with INDRA from an identifiers namespace.
For example, this function can be used to map 'uniprot' to 'UP'.
Parameters
----------
identifiers_ns : str
An identifiers.org standard namespace.
Returns
-------
str or None
The namespace compatible with INDRA's internal representation or
None if the given namespace isn't an identifiers.org standard.
"""
reg_entry = identifiers_registry.get(identifiers_ns.lower())
if not reg_entry:
return None
mapping = identifiers_reverse.get(identifiers_ns.lower())
if mapping:
return mapping
else:
return identifiers_ns.upper()
def get_ns_id_from_identifiers(identifiers_ns, identifiers_id):
"""Return a namespace/ID pair compatible with INDRA from identifiers.
Parameters
----------
identifiers_ns : str
An identifiers.org standard namespace.
identifiers_id : str
An identifiers.org standard ID in the given namespace.
Returns
-------
(str, str)
A namespace and ID that are valid in INDRA db_refs.
"""
reg_entry = identifiers_registry.get(identifiers_ns.lower())
db_ns = get_ns_from_identifiers(identifiers_ns)
if db_ns is None:
return None, None
db_id = identifiers_id
if reg_entry['namespace_embedded']:
if not identifiers_id.startswith(identifiers_ns.upper()):
db_id = '%s:%s' % (identifiers_ns.upper(), identifiers_id)
return db_ns, db_id
def get_identifiers_ns(db_name):
"""Map an INDRA namespace to an identifiers.org namespace when possible.
Example: this can be used to map 'UP' to 'uniprot'.
Parameters
----------
db_name : str
An INDRA namespace to map to identifiers.org
Returns
-------
str or None
An identifiers.org namespace or None if not available.
"""
mapped_db_name = identifiers_mappings.get(db_name, db_name.lower())
if mapped_db_name not in identifiers_registry:
return None
return mapped_db_name
def get_url_prefix(db_name):
"""Return the URL prefix for a given namespace."""
identifiers_ns = get_identifiers_ns(db_name)
if identifiers_ns:
identifiers_entry = identifiers_registry.get(identifiers_ns)
if not identifiers_entry['namespace_embedded']:
return '%s/%s:' % (identifiers_url, identifiers_ns.lower())
else:
return '%s/' % identifiers_url
else:
if db_name in url_prefixes:
return url_prefixes[db_name]
return None
def get_identifiers_url(db_name, db_id):
"""Return an identifiers.org URL for a given database name and ID.
Parameters
----------
db_name : str
An internal database name: HGNC, UP, CHEBI, etc.
db_id : str
An identifier in the given database.
Returns
-------
url : str
An identifiers.org URL corresponding to the given database name and ID.
"""
# This is the case where we have a prefix that we can simply attach the
# db_id to to get the desired URL.
if db_name == 'CHEMBL':
db_id = ensure_chembl_prefix(db_id)
elif db_name == 'CHEBI':
db_id = ensure_chebi_prefix(db_id)
prefix = get_url_prefix(db_name)
if prefix:
return '%s%s' % (prefix, db_id)
# Otherwise, we have to handle some special cases
bel_scai_url = 'https://arty.scai.fraunhofer.de/artifactory/bel/namespace/'
if db_name == 'LINCS':
if db_id.startswith('LSM-'): # Lincs Small Molecule ID
url = identifiers_url + '/lincs.smallmolecule:%s' % db_id
elif db_id.startswith('LCL-'): # Lincs Cell Line ID
url = identifiers_url + '/lincs.cell:%s' % db_id
else: # Assume LINCS Protein
url = identifiers_url + '/lincs.protein:%s' % db_id
elif db_name == 'CHEMBL':
if not db_id.startswith('CHEMBL'):
db_id = 'CHEMBL%s' % db_id
url = identifiers_url + '/chembl.compound:%s' % db_id
elif db_name == 'HMS-LINCS':
url = 'http://lincs.hms.harvard.edu/db/sm/%s-101' % db_id
# Special cases with no identifiers entry
elif db_name == 'SCHEM':
url = bel_scai_url + 'selventa-legacy-chemicals/' + \
'selventa-legacy-chemicals-20150601.belns'
elif db_name == 'SCOMP':
url = bel_scai_url + 'selventa-named-complexes/' + \
'selventa-named-complexes-20150601.belns'
elif db_name == 'SFAM':
url = bel_scai_url + 'selventa-protein-families/' + \
'selventa-protein-families-20150601.belns'
elif db_name == 'TEXT' or db_name == 'TEXT_NORM':
return None
else:
logger.warning('Unhandled name space %s' % db_name)
url = None
return url
def parse_identifiers_url(url):
"""Retrieve database name and ID given the URL.
Parameters
----------
url : str
An identifiers.org URL to parse.
Returns
-------
db_name : str
An internal database name: HGNC, UP, CHEBI, etc. corresponding to the
given URL.
db_id : str
An identifier in the database.
"""
# Try matching by string pattern
db_ns, db_id = None, None
url_pattern = \
r'(?:https?)://identifiers.org/([A-Za-z0-9.-]+)(/|:)([A-Za-z0-9:_.-]+)'
match = re.match(url_pattern, url)
if match is not None:
g = match.groups()
if len(g) == 3:
pattern_ns, pattern_id = g[0], g[2]
db_ns, db_id = get_ns_id_from_identifiers(pattern_ns, pattern_id)
if db_ns == 'HGNC':
if db_id.startswith('HGNC:'):
db_id = db_id[5:]
# If we got UP and UPPRO, return UPPRO
if db_ns == 'UP' and '#PRO_' in url:
db_ns = 'UPPRO'
db_id = url[url.find('PRO_'):]
if db_ns and db_id:
return db_ns, db_id
for ns, prefix in url_prefixes.items():
if url.startswith(prefix):
return ns, url[len(prefix):]
# Handle other special cases
for part in ['/lincs.smallmolecule', '/lincs.cell', '/lincs.protein']:
if part in url:
return 'LINCS', url[(url.find(part) + len(part) + 1):]
if '/chembl.compound' in url:
return 'CHEMBL', url[
(url.find('/chembl.compound') + len('/chembl.compound:')):]
if 'lincs.hms.harvard.edu' in url:
return 'HMS-LINCS', url[len('http://lincs.hms.harvard.edu/db/sm/'):-4]
if 'selventa-legacy-chemicals/' in url:
return 'SCHEM', None
if 'selventa-named-complexes/' in url:
return 'SCOMP', None
if 'selventa-protein-families/' in url:
return 'SFAM', None
else:
logger.warning('Could not parse URL %s' % url)
return None, None
def namespace_embedded(db_ns: str) -> bool:
"""Return true if this namespace requires IDs to have namespace embedded.
This function first maps the given namespace to an identifiers.org
namespace and then checks the registry to see if namespaces need
to be embedded in IDs. If yes, it returns True. If not, or the ID can't
be mapped to identifiers.org, it returns False
Parameters
----------
db_ns :
The namespace to check.
Returns
-------
:
True if the namespace is known to be embedded in IDs of this
namespace. False if unknown or known not to be embedded.
"""
identifiers_ns = get_identifiers_ns(db_ns)
if identifiers_ns:
identifiers_entry = identifiers_registry.get(identifiers_ns)
if identifiers_entry['namespace_embedded']:
return True
return False
def ensure_prefix_if_needed(db_ns: str, db_id: str) -> str:
"""Return an ID ensuring a namespace prefix if known to be needed.
Parameters
----------
db_ns :
The namespace associated with the identifier.
db_id :
The original identifier.
Returns
-------
:
The identifier with namespace embedded if needed.
"""
if namespace_embedded(db_ns):
return ensure_prefix(db_ns, db_id)
return db_id
def ensure_prefix(db_ns, db_id, with_colon=True):
"""Return a valid ID that has the given namespace embedded.
This is useful for namespaces such as CHEBI, GO or BTO that require
the namespace to be part of the ID. Note that this function always
ensures that the given db_ns is embedded in the ID and can handle the
case whene it's already present.
Parameters
----------
db_ns : str
A namespace.
db_id : str
An ID within that namespace which should have the namespace
as a prefix in it.
with_colon: Optional[bool]
If True, the namespace prefix is followed by a colon in the ID (e.g.,
CHEBI:12345). Otherwise, no colon is added (e.g., CHEMBL1234).
Default: True
"""
if db_id is None:
return None
colon = ':' if with_colon else ''
if not db_id.startswith(f'{db_ns}{colon}'):
return f'{db_ns}{colon}{db_id}'
return db_id
def ensure_chebi_prefix(chebi_id):
"""Return a valid CHEBI ID that has the appropriate CHEBI: prefix."""
return ensure_prefix('CHEBI', chebi_id)
def ensure_chembl_prefix(chembl_id):
"""Return a valid CHEMBL ID that has the appropriate CHEMBL prefix."""
return ensure_prefix('CHEMBL', chembl_id, with_colon=False)
identifiers_registry = load_resource_json('identifiers_patterns.json')
|
johnbachman/indra
|
indra/databases/identifiers.py
|
Python
|
bsd-2-clause
| 11,854
| 0
|
if x() and y() and z():
a()
else:
b()
|
bronikkk/tirpan
|
tests/test_mir04.py
|
Python
|
gpl-3.0
| 46
| 0
|
def action_comment_load_more(context, action, entity_type, entity_id, last_id, parent_id, **args):
try:
entity = IN.entitier.load_single(entity_type, int(entity_id))
if not entity:
return
output = Object()
db = IN.db
connection = db.connection
container_id = IN.commenter.get_container_id(entity)
# TODO: paging
# get total
total = 0
limit = 10
cursor = db.select({
'table' : 'entity.comment',
'columns' : ['count(id)'],
'where' : [
['container_id', container_id],
['id', '<', int(last_id)], # load previous
['parent_id', parent_id],
['status', 1],
],
}).execute()
if cursor.rowcount >= 0:
total = int(cursor.fetchone()[0])
more_id = '_'.join(('more-commens', entity_type, str(entity_id), str(parent_id)))
if total > 0:
cursor = db.select({
'table' : 'entity.comment',
'columns' : ['id'],
'where' : [
['container_id', container_id],
['id', '<', int(last_id)],
['parent_id', parent_id], # add main level comments only
['status', 1],
],
'order' : {'created' : 'DESC'},
'limit' : limit,
}).execute()
ids = []
last_id = 0
if cursor.rowcount >= 0:
for row in cursor:
ids.append(row['id'])
last_id = ids[-1] # last id
comments = IN.entitier.load_multiple('Comment', ids)
for id, comment in comments.items():
comment.weight = id # keep asc order
output.add(comment)
remaining = total - limit
if remaining > 0 and last_id > 0:
output.add('TextDiv', {
'id' : more_id,
'value' : str(remaining) + ' more comments',
'css' : ['ajax i-text-center i-text-danger pointer'],
'attributes' : {
'data-href' : ''.join(('/comment/more/!Content/', str(entity_id), '/', str(last_id), '/', str(parent_id)))
},
'weight' : -1,
})
#if not output:
#output.add(type = 'TextDiv', data = {})
output = {more_id : output}
context.response = In.core.response.PartialResponse(output = output)
except:
IN.logger.debug()
|
vinoth3v/In
|
In/comment/page/load_more.py
|
Python
|
apache-2.0
| 2,050
| 0.054634
|
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.request import jsonified, getParams
from couchpotato.core.helpers.variable import mergeDicts, md5, getExt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Quality, Profile, ProfileType
from sqlalchemy.sql.expression import or_
import os.path
import re
import time
log = CPLog(__name__)
class QualityPlugin(Plugin):
qualities = [
{'identifier': 'bd50', 'hd': True, 'size': (15000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25'], 'allow': ['1080p'], 'ext':[], 'tags': ['bdmv', 'certificate', ('complete', 'bluray')]},
{'identifier': '1080p', 'hd': True, 'size': (5000, 20000), 'label': '1080P', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts'], 'tags': ['m2ts']},
{'identifier': '720p', 'hd': True, 'size': (3500, 10000), 'label': '720P', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts']},
{'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p'], 'ext':['avi']},
{'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': [], 'allow': [], 'ext':['iso', 'img'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts']},
{'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': ['dvdrip'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]},
{'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip'], 'allow': ['dvdr', 'dvd'], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': [], 'allow': ['dvdr'], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'ts', 'size': (600, 1000), 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']},
{'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': [], 'ext':['avi', 'mpg', 'mpeg']}
]
pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr']
def __init__(self):
addEvent('quality.all', self.all)
addEvent('quality.single', self.single)
addEvent('quality.guess', self.guess)
addEvent('quality.pre_releases', self.preReleases)
addApiView('quality.size.save', self.saveSize)
addApiView('quality.list', self.allView, docs = {
'desc': 'List all available qualities',
'return': {'type': 'object', 'example': """{
'success': True,
'list': array, qualities
}"""}
})
addEvent('app.initialize', self.fill, priority = 10)
def preReleases(self):
return self.pre_releases
def allView(self):
return jsonified({
'success': True,
'list': self.all()
})
def all(self):
db = get_session()
qualities = db.query(Quality).all()
temp = []
for quality in qualities:
q = mergeDicts(self.getQuality(quality.identifier), quality.to_dict())
temp.append(q)
return temp
def single(self, identifier = ''):
db = get_session()
quality_dict = {}
quality = db.query(Quality).filter(or_(Quality.identifier == identifier, Quality.id == identifier)).first()
if quality:
quality_dict = dict(self.getQuality(quality.identifier), **quality.to_dict())
return quality_dict
def getQuality(self, identifier):
for q in self.qualities:
if identifier == q.get('identifier'):
return q
def saveSize(self):
params = getParams()
db = get_session()
quality = db.query(Quality).filter_by(identifier = params.get('identifier')).first()
if quality:
setattr(quality, params.get('value_type'), params.get('value'))
db.commit()
return jsonified({
'success': True
})
def fill(self):
db = get_session();
order = 0
for q in self.qualities:
# Create quality
qual = db.query(Quality).filter_by(identifier = q.get('identifier')).first()
if not qual:
log.info('Creating quality: %s', q.get('label'))
qual = Quality()
qual.order = order
qual.identifier = q.get('identifier')
qual.label = toUnicode(q.get('label'))
qual.size_min, qual.size_max = q.get('size')
db.add(qual)
# Create single quality profile
prof = db.query(Profile).filter(
Profile.core == True
).filter(
Profile.types.any(quality = qual)
).all()
if not prof:
log.info('Creating profile: %s', q.get('label'))
prof = Profile(
core = True,
label = toUnicode(qual.label),
order = order
)
db.add(prof)
profile_type = ProfileType(
quality = qual,
profile = prof,
finish = True,
order = 0
)
prof.types.append(profile_type)
order += 1
db.commit()
time.sleep(0.3) # Wait a moment
return True
def guess(self, files, extra = {}):
# Create hash for cache
hash = md5(str([f.replace('.' + getExt(f), '') for f in files]))
cached = self.getCache(hash)
if cached and extra is {}: return cached
for cur_file in files:
size = (os.path.getsize(cur_file) / 1024 / 1024) if os.path.isfile(cur_file) else 0
words = re.split('\W+', cur_file.lower())
for quality in self.all():
# Check tags
if quality['identifier'] in words:
log.debug('Found via identifier "%s" in %s', (quality['identifier'], cur_file))
return self.setCache(hash, quality)
if list(set(quality.get('alternative', [])) & set(words)):
log.debug('Found %s via alt %s in %s', (quality['identifier'], quality.get('alternative'), cur_file))
return self.setCache(hash, quality)
for tag in quality.get('tags', []):
if isinstance(tag, tuple) and '.'.join(tag) in '.'.join(words):
log.debug('Found %s via tag %s in %s', (quality['identifier'], quality.get('tags'), cur_file))
return self.setCache(hash, quality)
if list(set(quality.get('tags', [])) & set(words)):
log.debug('Found %s via tag %s in %s', (quality['identifier'], quality.get('tags'), cur_file))
return self.setCache(hash, quality)
# Try again with loose testing
quality = self.guessLoose(hash, extra = extra)
if quality:
return self.setCache(hash, quality)
log.debug('Could not identify quality for: %s', files)
return None
def guessLoose(self, hash, extra):
for quality in self.all():
# Check width resolution, range 20
if (quality.get('width', 720) - 20) <= extra.get('resolution_width', 0) <= (quality.get('width', 720) + 20):
log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width', 720), extra.get('resolution_width', 0)))
return self.setCache(hash, quality)
# Check height resolution, range 20
if (quality.get('height', 480) - 20) <= extra.get('resolution_height', 0) <= (quality.get('height', 480) + 20):
log.debug('Found %s via resolution_height: %s == %s', (quality['identifier'], quality.get('height', 480), extra.get('resolution_height', 0)))
return self.setCache(hash, quality)
if 480 <= extra.get('resolution_width', 0) <= 720:
log.debug('Found as dvdrip')
return self.setCache(hash, self.single('dvdrip'))
return None
|
jayme-github/CouchPotatoServer
|
couchpotato/core/plugins/quality/main.py
|
Python
|
gpl-3.0
| 8,674
| 0.007033
|
#coding:utf-8
import numpy as np
from chainer import Variable, FunctionSet
import chainer.functions as F
class LSTM(FunctionSet):
def __init__(self,f_n_units, n_units):
super(LSTM, self).__init__(
l1_x = F.Linear(f_n_units, 4*n_units),
l1_h = F.Linear(n_units, 4*n_units),
l6 = F.Linear(n_units, f_n_units)
)
# パラメータの値を-0.08~0.08の範囲で初期化
for param in self.parameters:
param[:] = np.random.uniform(-0.08, 0.08, param.shape)
def forward_one_step(self, x_data, y_data, state, train=True,dropout_ratio=0.0):
x ,t = Variable(x_data,volatile=not train),Variable(y_data,volatile=not train)
h1_in = self.l1_x(F.dropout(x, ratio=dropout_ratio, train=train)) + self.l1_h(state['h1'])
c1, h1 = F.lstm(state['c1'], h1_in)
y = self.l6(F.dropout(h1, ratio=dropout_ratio, train=train))
state = {'c1': c1, 'h1': h1}
return state, F.mean_squared_error(y, t)
def predict(self, x_data, y_data, state):
x ,t = Variable(x_data,volatile=False),Variable(y_data,volatile=False)
h1_in = self.l1_x(x) + self.l1_h(state['h1'])
c1, h1 = F.lstm(state['c1'], h1_in)
y = self.l6(h1)
state = {'c1': c1, 'h1': h1}
return state,F.mean_squared_error(y,t)
def make_initial_state(n_units,train = True):
return {name: Variable(np.zeros((1,n_units), dtype=np.float32),
volatile=not train)
for name in ('c1', 'h1')}
#for name in ('c1', 'h1', 'c2', 'h2', 'c3', 'h3','c4','h4','c5','h5')}
|
wbap/Hackathon2015
|
Nishida/WBAI_open_code/lstm/lstm.py
|
Python
|
apache-2.0
| 1,640
| 0.028571
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/registry -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_adm_registry
short_description: Module to manage openshift registry
description:
- Manage openshift registry programmatically.
options:
state:
description:
- The desired action when managing openshift registry
- present - update or create the registry
- absent - tear down the registry service and deploymentconfig
- list - returns the current representiation of a registry
required: false
default: False
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- The name of the registry
required: false
default: None
aliases: []
namespace:
description:
- The selector when filtering on node labels
required: false
default: None
aliases: []
images:
description:
- The image to base this registry on - ${component} will be replaced with --type
required: 'openshift3/ose-${component}:${version}'
default: None
aliases: []
latest_images:
description:
- If true, attempt to use the latest image for the registry instead of the latest release.
required: false
default: False
aliases: []
labels:
description:
- A set of labels to uniquely identify the registry and its components.
required: false
default: None
aliases: []
enforce_quota:
description:
- If set, the registry will refuse to write blobs if they exceed quota limits
required: False
default: False
aliases: []
mount_host:
description:
- If set, the registry volume will be created as a host-mount at this path.
required: False
default: False
aliases: []
ports:
description:
- A comma delimited list of ports or port pairs to expose on the registry pod. The default is set for 5000.
required: False
default: [5000]
aliases: []
replicas:
description:
- The replication factor of the registry; commonly 2 when high availability is desired.
required: False
default: 1
aliases: []
selector:
description:
- Selector used to filter nodes on deployment. Used to run registries on a specific set of nodes.
required: False
default: None
aliases: []
service_account:
description:
- Name of the service account to use to run the registry pod.
required: False
default: 'registry'
aliases: []
tls_certificate:
description:
- An optional path to a PEM encoded certificate (which may contain the private key) for serving over TLS
required: false
default: None
aliases: []
tls_key:
description:
- An optional path to a PEM encoded private key for serving over TLS
required: false
default: None
aliases: []
volume_mounts:
description:
- The volume mounts for the registry.
required: false
default: None
aliases: []
daemonset:
description:
- Use a daemonset instead of a deployment config.
required: false
default: False
aliases: []
edits:
description:
- A list of modifications to make on the deploymentconfig
required: false
default: None
aliases: []
env_vars:
description:
- A dictionary of modifications to make on the deploymentconfig. e.g. FOO: BAR
required: false
default: None
aliases: []
force:
description:
- Force a registry update.
required: false
default: False
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create a secure registry
oc_adm_registry:
name: docker-registry
service_account: registry
replicas: 2
namespace: default
selector: type=infra
images: "registry.ops.openshift.com/openshift3/ose-${component}:${version}"
env_vars:
REGISTRY_CONFIGURATION_PATH: /etc/registryconfig/config.yml
REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt
REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key
REGISTRY_HTTP_SECRET: supersecret
volume_mounts:
- path: /etc/secrets
name: dockercerts
type: secret
secret_name: registry-secret
- path: /etc/registryconfig
name: dockersecrets
type: secret
secret_name: docker-registry-config
edits:
- key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.strategy.rollingParams
value:
intervalSeconds: 1
maxSurge: 50%
maxUnavailable: 50%
timeoutSeconds: 600
updatePeriodSeconds: 1
action: put
- key: spec.template.spec.containers[0].resources.limits.memory
value: 2G
action: update
- key: spec.template.spec.containers[0].resources.requests.memory
value: 1G
action: update
register: registryout
'''
# -*- -*- -*- End included fragment: doc/registry -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
metadata:
name: default_dc
namespace: default
spec:
replicas: 0
selector:
default_dc: default_dc
strategy:
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 0
maxUnavailable: 25%
timeoutSeconds: 600
updatePercent: -25
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
spec:
containers:
- env:
- name: default
value: default
image: default
imagePullPolicy: IfNotPresent
name: default_dc
ports:
- containerPort: 8000
hostPort: 8000
protocol: TCP
name: default_port
resources: {}
terminationMessagePath: /dev/termination-log
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
type: compute
restartPolicy: Always
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
triggers:
- type: ConfigChange
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content=None):
''' Constructor for deploymentconfig '''
if not content:
content = DeploymentConfig.default_deployment_config
super(DeploymentConfig, self).__init__(content=content)
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
env = self.get_env_vars()
if env:
env.append({'name': key, 'value': value})
rval = True
else:
result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
rval = result[0]
return rval
def exists_env_value(self, key, value):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key and result['value'] == value:
return True
return False
def exists_env_key(self, key):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key:
return True
return False
def get_env_var(self, key):
'''return a environment variables '''
results = self.get(DeploymentConfig.env_path) or []
if not results:
return None
for env_var in results:
if env_var['name'] == key:
return env_var
return None
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
def delete_env_var(self, keys):
'''delete a list of keys '''
if not isinstance(keys, list):
keys = [keys]
env_vars_array = self.get_env_vars()
modified = False
idx = None
for key in keys:
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
modified = True
del env_vars_array[idx]
if modified:
return True
return False
def update_env_var(self, key, value):
'''place an env in the env var list'''
env_vars_array = self.get_env_vars()
idx = None
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
env_vars_array[idx]['value'] = value
else:
self.add_env_value(key, value)
return True
def exists_volume_mount(self, volume_mount):
''' return whether a volume mount exists '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts:
return False
volume_mount_found = False
for exist_volume_mount in exist_volume_mounts:
if exist_volume_mount['name'] == volume_mount['name']:
volume_mount_found = True
break
return volume_mount_found
def exists_volume(self, volume):
''' return whether a volume exists '''
exist_volumes = self.get_volumes()
volume_found = False
for exist_volume in exist_volumes:
if exist_volume['name'] == volume['name']:
volume_found = True
break
return volume_found
def find_volume_by_name(self, volume, mounts=False):
''' return the index of a volume '''
volumes = []
if mounts:
volumes = self.get_volume_mounts()
else:
volumes = self.get_volumes()
for exist_volume in volumes:
if exist_volume['name'] == volume['name']:
return exist_volume
return None
def get_replicas(self):
''' return replicas setting '''
return self.get(DeploymentConfig.replicas_path)
def get_volume_mounts(self):
'''return volume mount information '''
return self.get_volumes(mounts=True)
def get_volumes(self, mounts=False):
'''return volume mount information '''
if mounts:
return self.get(DeploymentConfig.volume_mounts_path) or []
return self.get(DeploymentConfig.volumes_path) or []
def delete_volume_by_name(self, volume):
'''delete a volume '''
modified = False
exist_volume_mounts = self.get_volume_mounts()
exist_volumes = self.get_volumes()
del_idx = None
for idx, exist_volume in enumerate(exist_volumes):
if 'name' in exist_volume and exist_volume['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volumes[del_idx]
modified = True
del_idx = None
for idx, exist_volume_mount in enumerate(exist_volume_mounts):
if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volume_mounts[idx]
modified = True
return modified
def add_volume_mount(self, volume_mount):
''' add a volume or volume mount to the proper location '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts and volume_mount:
self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
else:
exist_volume_mounts.append(volume_mount)
def add_volume(self, volume):
''' add a volume or volume mount to the proper location '''
exist_volumes = self.get_volumes()
if not volume:
return
if not exist_volumes:
self.put(DeploymentConfig.volumes_path, [volume])
else:
exist_volumes.append(volume)
def update_replicas(self, replicas):
''' update replicas value '''
self.put(DeploymentConfig.replicas_path, replicas)
def update_volume(self, volume):
'''place an env in the env var list'''
exist_volumes = self.get_volumes()
if not volume:
return False
# update the volume
update_idx = None
for idx, exist_vol in enumerate(exist_volumes):
if exist_vol['name'] == volume['name']:
update_idx = idx
break
if update_idx != None:
exist_volumes[update_idx] = volume
else:
self.add_volume(volume)
return True
def update_volume_mount(self, volume_mount):
'''place an env in the env var list'''
modified = False
exist_volume_mounts = self.get_volume_mounts()
if not volume_mount:
return False
# update the volume mount
for exist_vol_mount in exist_volume_mounts:
if exist_vol_mount['name'] == volume_mount['name']:
if 'mountPath' in exist_vol_mount and \
str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
exist_vol_mount['mountPath'] = volume_mount['mountPath']
modified = True
break
if not modified:
self.add_volume_mount(volume_mount)
modified = True
return modified
def needs_update_volume(self, volume, volume_mount):
''' verify a volume update is needed '''
exist_volume = self.find_volume_by_name(volume)
exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
results = []
results.append(exist_volume['name'] == volume['name'])
if 'secret' in volume:
results.append('secret' in exist_volume)
results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
results.append(exist_volume_mount['name'] == volume_mount['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'emptyDir' in volume:
results.append(exist_volume_mount['name'] == volume['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'persistentVolumeClaim' in volume:
pvc = 'persistentVolumeClaim'
results.append(pvc in exist_volume)
if results[-1]:
results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
if 'claimSize' in volume[pvc]:
results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
elif 'hostpath' in volume:
results.append('hostPath' in exist_volume)
results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
return not all(results)
def needs_update_replicas(self, replicas):
''' verify whether a replica update is needed '''
current_reps = self.get(DeploymentConfig.replicas_path)
return not current_reps == replicas
# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class SecretConfig(object):
''' Handle secret options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
secrets=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.secrets = secrets
self.data = {}
self.create_dict()
def create_dict(self):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['data'] = {}
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
''' Class to wrap the oc command line tools '''
secret_path = "data"
kind = 'secret'
def __init__(self, content):
'''secret constructor'''
super(Secret, self).__init__(content=content)
self._secrets = None
@property
def secrets(self):
'''secret property getter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
@secrets.setter
def secrets(self):
'''secret property setter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
def get_secrets(self):
''' returns all of the defined secrets '''
return self.get(Secret.secret_path) or {}
def add_secret(self, key, value):
''' add a secret '''
if self.secrets:
self.secrets[key] = value
else:
self.put(Secret.secret_path, {key: value})
return True
def delete_secret(self, key):
''' delete secret'''
try:
del self.secrets[key]
except KeyError as _:
return False
return True
def find_secret(self, key):
''' find secret'''
rval = None
try:
rval = self.secrets[key]
except KeyError as _:
return None
return {'key': key, 'value': rval}
def update_secret(self, key, value):
''' update a secret'''
if key in self.secrets:
self.secrets[key] = value
else:
self.add_secret(key, value)
return True
# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/service.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class ServiceConfig(object):
''' Handle service options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
ports,
selector=None,
labels=None,
cluster_ip=None,
portal_ip=None,
session_affinity=None,
service_type=None,
external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
self.ports = ports
self.selector = selector
self.labels = labels
self.cluster_ip = cluster_ip
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
self.external_ips = external_ips
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiates a service dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Service'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
self.data['metadata']['labels'] = {}
for lab, lab_value in self.labels.items():
self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
self.data['spec']['ports'] = self.ports
else:
self.data['spec']['ports'] = []
if self.selector:
self.data['spec']['selector'] = self.selector
self.data['spec']['sessionAffinity'] = self.session_affinity or 'None'
if self.cluster_ip:
self.data['spec']['clusterIP'] = self.cluster_ip
if self.portal_ip:
self.data['spec']['portalIP'] = self.portal_ip
if self.service_type:
self.data['spec']['type'] = self.service_type
if self.external_ips:
self.data['spec']['externalIPs'] = self.external_ips
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
super(Service, self).__init__(content=content)
def get_ports(self):
''' get a list of ports '''
return self.get(Service.port_path) or []
def get_selector(self):
''' get the service selector'''
return self.get(Service.selector_path) or {}
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get_ports()
if not ports:
self.put(Service.port_path, inc_ports)
else:
ports.extend(inc_ports)
return True
def find_ports(self, inc_port):
''' find a specific port '''
for port in self.get_ports():
if port['port'] == inc_port['port']:
return port
return None
def delete_ports(self, inc_ports):
''' remove a port from a service '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get(Service.port_path) or []
if not ports:
return True
removed = False
for inc_port in inc_ports:
port = self.find_ports(inc_port)
if port:
ports.remove(port)
removed = True
return removed
def add_cluster_ip(self, sip):
'''add cluster ip'''
self.put(Service.cluster_ip, sip)
def add_portal_ip(self, pip):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
def get_external_ips(self):
''' get a list of external_ips '''
return self.get(Service.external_ips) or []
def add_external_ips(self, inc_external_ips):
''' add an external_ip to the external_ips list '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get_external_ips()
if not external_ips:
self.put(Service.external_ips, inc_external_ips)
else:
external_ips.extend(inc_external_ips)
return True
def find_external_ips(self, inc_external_ip):
''' find a specific external IP '''
val = None
try:
idx = self.get_external_ips().index(inc_external_ip)
val = self.get_external_ips()[idx]
except ValueError:
pass
return val
def delete_external_ips(self, inc_external_ips):
''' remove an external IP from a service '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get(Service.external_ips) or []
if not external_ips:
return True
removed = False
for inc_external_ip in inc_external_ips:
external_ip = self.find_external_ips(inc_external_ip)
if external_ip:
external_ips.remove(external_ip)
removed = True
return removed
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-
class Volume(object):
''' Class to represent an openshift volume object'''
volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
"dc": "spec.template.spec.containers[0].volumeMounts",
"rc": "spec.template.spec.containers[0].volumeMounts",
}
volumes_path = {"pod": "spec.volumes",
"dc": "spec.template.spec.volumes",
"rc": "spec.template.spec.volumes",
}
@staticmethod
def create_volume_structure(volume_info):
''' return a properly structured volume '''
volume_mount = None
volume = {'name': volume_info['name']}
volume_type = volume_info['type'].lower()
if volume_type == 'secret':
volume['secret'] = {}
volume[volume_info['type']] = {'secretName': volume_info['secret_name']}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'emptydir':
volume['emptyDir'] = {}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':
volume['persistentVolumeClaim'] = {}
volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']
volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']
elif volume_type == 'hostpath':
volume['hostPath'] = {}
volume['hostPath']['path'] = volume_info['path']
elif volume_type == 'configmap':
volume['configMap'] = {}
volume['configMap']['name'] = volume_info['configmap_name']
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
return (volume, volume_mount)
# -*- -*- -*- End included fragment: lib/volume.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_version.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCVersion(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
debug):
''' Constructor for OCVersion '''
super(OCVersion, self).__init__(None, config)
self.debug = debug
def get(self):
'''get and return version information '''
results = {}
version_results = self._version()
if version_results['returncode'] == 0:
filtered_vers = Utils.filter_versions(version_results['results'])
custom_vers = Utils.add_custom_versions(filtered_vers)
results['returncode'] = version_results['returncode']
results.update(filtered_vers)
results.update(custom_vers)
return results
raise OpenShiftCLIError('Problem detecting openshift version.')
@staticmethod
def run_ansible(params):
'''run the idempotent ansible code'''
oc_version = OCVersion(params['kubeconfig'], params['debug'])
if params['state'] == 'list':
#pylint: disable=protected-access
result = oc_version.get()
return {'state': params['state'],
'results': result,
'changed': False}
# -*- -*- -*- End included fragment: class/oc_version.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_adm_registry.py -*- -*- -*-
class RegistryException(Exception):
''' Registry Exception Class '''
pass
class RegistryConfig(OpenShiftCLIConfig):
''' RegistryConfig is a DTO for the registry. '''
def __init__(self, rname, namespace, kubeconfig, registry_options):
super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options)
class Registry(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
volume_mount_path = 'spec.template.spec.containers[0].volumeMounts'
volume_path = 'spec.template.spec.volumes'
env_path = 'spec.template.spec.containers[0].env'
def __init__(self,
registry_config,
verbose=False):
''' Constructor for Registry
a registry consists of 3 or more parts
- dc/docker-registry
- svc/docker-registry
Parameters:
:registry_config:
:verbose:
'''
super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose)
self.version = OCVersion(registry_config.kubeconfig, verbose)
self.svc_ip = None
self.portal_ip = None
self.config = registry_config
self.verbose = verbose
self.registry_parts = [{'kind': 'dc', 'name': self.config.name},
{'kind': 'svc', 'name': self.config.name},
]
self.__prepared_registry = None
self.volume_mounts = []
self.volumes = []
if self.config.config_options['volume_mounts']['value']:
for volume in self.config.config_options['volume_mounts']['value']:
volume_info = {'secret_name': volume.get('secret_name', None),
'name': volume.get('name', None),
'type': volume.get('type', None),
'path': volume.get('path', None),
'claimName': volume.get('claim_name', None),
'claimSize': volume.get('claim_size', None),
}
vol, vol_mount = Volume.create_volume_structure(volume_info)
self.volumes.append(vol)
self.volume_mounts.append(vol_mount)
self.dconfig = None
self.svc = None
@property
def deploymentconfig(self):
''' deploymentconfig property '''
return self.dconfig
@deploymentconfig.setter
def deploymentconfig(self, config):
''' setter for deploymentconfig property '''
self.dconfig = config
@property
def service(self):
''' service property '''
return self.svc
@service.setter
def service(self, config):
''' setter for service property '''
self.svc = config
@property
def prepared_registry(self):
''' prepared_registry property '''
if not self.__prepared_registry:
results = self.prepare_registry()
if not results or ('returncode' in results and results['returncode'] != 0):
raise RegistryException('Could not perform registry preparation. {}'.format(results))
self.__prepared_registry = results
return self.__prepared_registry
@prepared_registry.setter
def prepared_registry(self, data):
''' setter method for prepared_registry attribute '''
self.__prepared_registry = data
def get(self):
''' return the self.registry_parts '''
self.deploymentconfig = None
self.service = None
rval = 0
for part in self.registry_parts:
result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
self.service = Service(result['results'][0])
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'deploymentconfig': self.deploymentconfig, 'service': self.service}
def exists(self):
'''does the object exist?'''
if self.deploymentconfig and self.service:
return True
return False
def delete(self, complete=True):
'''return all pods '''
parts = []
for part in self.registry_parts:
if not complete and part['kind'] == 'svc':
continue
parts.append(self._delete(part['kind'], part['name']))
# Clean up returned results
rval = 0
for part in parts:
# pylint: disable=invalid-sequence-index
if 'returncode' in part and part['returncode'] != 0:
rval = part['returncode']
return {'returncode': rval, 'results': parts}
def prepare_registry(self):
''' prepare a registry for instantiation '''
options = self.config.to_option_list(ascommalist='labels')
cmd = ['registry']
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
# probably need to parse this
# pylint thinks results is a string
# pylint: disable=no-member
if results['returncode'] != 0 and 'items' not in results['results']:
raise RegistryException('Could not perform registry preparation. {}'.format(results))
service = None
deploymentconfig = None
# pylint: disable=invalid-sequence-index
for res in results['results']['items']:
if res['kind'] == 'DeploymentConfig':
deploymentconfig = DeploymentConfig(res)
elif res['kind'] == 'Service':
service = Service(res)
# Verify we got a service and a deploymentconfig
if not service or not deploymentconfig:
return results
# results will need to get parsed here and modifications added
deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig))
# modify service ip
if self.svc_ip:
service.put('spec.clusterIP', self.svc_ip)
if self.portal_ip:
service.put('spec.portalIP', self.portal_ip)
# the dry-run doesn't apply the selector correctly
if self.service:
service.put('spec.selector', self.service.get_selector())
# need to create the service and the deploymentconfig
service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict)
deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict)
return {"service": service,
"service_file": service_file,
"service_update": False,
"deployment": deploymentconfig,
"deployment_file": deployment_file,
"deployment_update": False}
def create(self):
'''Create a registry'''
results = []
self.needs_update()
# if the object is none, then we need to create it
# if the object needs an update, then we should call replace
# Handle the deploymentconfig
if self.deploymentconfig is None:
results.append(self._create(self.prepared_registry['deployment_file']))
elif self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
# Handle the service
if self.service is None:
results.append(self._create(self.prepared_registry['service_file']))
elif self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
# pylint: disable=invalid-sequence-index
if 'returncode' in result and result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def update(self):
'''run update for the registry. This performs a replace if required'''
# Store the current service IP
if self.service:
svcip = self.service.get('spec.clusterIP')
if svcip:
self.svc_ip = svcip
portip = self.service.get('spec.portalIP')
if portip:
self.portal_ip = portip
results = []
if self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
if self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def add_modifications(self, deploymentconfig):
''' update a deployment config with changes '''
# The environment variable for REGISTRY_HTTP_SECRET is autogenerated
# We should set the generated deploymentconfig to the in memory version
# the following modifications will overwrite if needed
if self.deploymentconfig:
result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET')
if result:
deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value'])
# Currently we know that our deployment of a registry requires a few extra modifications
# Modification 1
# we need specific environment variables to be set
for key, value in self.config.config_options['env_vars'].get('value', {}).items():
if not deploymentconfig.exists_env_key(key):
deploymentconfig.add_env_value(key, value)
else:
deploymentconfig.update_env_var(key, value)
# Modification 2
# we need specific volume variables to be set
for volume in self.volumes:
deploymentconfig.update_volume(volume)
for vol_mount in self.volume_mounts:
deploymentconfig.update_volume_mount(vol_mount)
# Modification 3
# Edits
edit_results = []
for edit in self.config.config_options['edits'].get('value', []):
if edit['action'] == 'put':
edit_results.append(deploymentconfig.put(edit['key'],
edit['value']))
if edit['action'] == 'update':
edit_results.append(deploymentconfig.update(edit['key'],
edit['value'],
edit.get('index', None),
edit.get('curr_value', None)))
if edit['action'] == 'append':
edit_results.append(deploymentconfig.append(edit['key'],
edit['value']))
if edit_results and not any([res[0] for res in edit_results]):
return None
return deploymentconfig.yaml_dict
def needs_update(self):
''' check to see if we need to update '''
exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol']
if self.service is None or \
not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
self.service.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['service_update'] = True
exclude_list = ['dnsPolicy',
'terminationGracePeriodSeconds',
'restartPolicy', 'timeoutSeconds',
'livenessProbe', 'readinessProbe',
'terminationMessagePath',
'securityContext',
'imagePullPolicy',
'protocol', # ports.portocol: TCP
'type', # strategy: {'type': 'rolling'}
'defaultMode', # added on secrets
'activeDeadlineSeconds', # added in 1.5 for timeouts
]
if self.deploymentconfig is None or \
not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
self.deploymentconfig.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['deployment_update'] = True
return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False
# In the future, we would like to break out each ansible state into a function.
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''run idempotent ansible code'''
registry_options = {'images': {'value': params['images'], 'include': True},
'latest_images': {'value': params['latest_images'], 'include': True},
'labels': {'value': params['labels'], 'include': True},
'ports': {'value': ','.join(params['ports']), 'include': True},
'replicas': {'value': params['replicas'], 'include': True},
'selector': {'value': params['selector'], 'include': True},
'service_account': {'value': params['service_account'], 'include': True},
'mount_host': {'value': params['mount_host'], 'include': True},
'env_vars': {'value': params['env_vars'], 'include': False},
'volume_mounts': {'value': params['volume_mounts'], 'include': False},
'edits': {'value': params['edits'], 'include': False},
'tls_key': {'value': params['tls_key'], 'include': True},
'tls_certificate': {'value': params['tls_certificate'], 'include': True},
}
# Do not always pass the daemonset and enforce-quota parameters because they are not understood
# by old versions of oc.
# Default value is false. So, it's safe to not pass an explicit false value to oc versions which
# understand these parameters.
if params['daemonset']:
registry_options['daemonset'] = {'value': params['daemonset'], 'include': True}
if params['enforce_quota']:
registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True}
rconfig = RegistryConfig(params['name'],
params['namespace'],
params['kubeconfig'],
registry_options)
ocregistry = Registry(rconfig, params['debug'])
api_rval = ocregistry.get()
state = params['state']
########
# get
########
if state == 'list':
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
if not ocregistry.exists():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
# Unsure as to why this is angry with the return type.
# pylint: disable=redefined-variable-type
api_rval = ocregistry.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
if state == 'present':
########
# Create
########
if not ocregistry.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
api_rval = ocregistry.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if not params['force'] and not ocregistry.needs_update():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = ocregistry.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. %s' % state}
# -*- -*- -*- End included fragment: class/oc_adm_registry.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_adm_registry.py -*- -*- -*-
def main():
'''
ansible oc module for registry
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
images=dict(default=None, type='str'),
latest_images=dict(default=False, type='bool'),
labels=dict(default=None, type='dict'),
ports=dict(default=['5000'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
service_account=dict(default='registry', type='str'),
mount_host=dict(default=None, type='str'),
volume_mounts=dict(default=None, type='list'),
env_vars=dict(default={}, type='dict'),
edits=dict(default=[], type='list'),
enforce_quota=dict(default=False, type='bool'),
force=dict(default=False, type='bool'),
daemonset=dict(default=False, type='bool'),
tls_key=dict(default=None, type='str'),
tls_certificate=dict(default=None, type='str'),
),
supports_check_mode=True,
)
results = Registry.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_adm_registry.py -*- -*- -*-
|
DG-i/openshift-ansible
|
roles/lib_openshift/library/oc_adm_registry.py
|
Python
|
apache-2.0
| 94,103
| 0.001551
|
### Copyright (C) 2002-2006 Stephen Kennedy <stevek@gnome.org>
### Copyright (C) 2010-2012 Kai Willadsen <kai.willadsen@gmail.com>
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; either version 2 of the License, or
### (at your option) any later version.
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
### USA.
from __future__ import print_function
import atexit
import tempfile
import shutil
import os
import sys
from gettext import gettext as _
import gtk
import pango
from . import melddoc
from . import misc
from . import paths
from . import recent
from . import tree
from . import vc
from .ui import emblemcellrenderer
from .ui import gnomeglade
################################################################################
#
# Local Functions
#
################################################################################
def _commonprefix(files):
if len(files) != 1:
workdir = misc.commonprefix(files)
else:
workdir = os.path.dirname(files[0]) or "."
return workdir
def cleanup_temp():
temp_location = tempfile.gettempdir()
# The strings below will probably end up as debug log, and are deliberately
# not marked for translation.
for f in _temp_files:
try:
assert os.path.exists(f) and os.path.isabs(f) and \
os.path.dirname(f) == temp_location
os.remove(f)
except:
except_str = "{0[0]}: \"{0[1]}\"".format(sys.exc_info())
print("File \"{0}\" not removed due to".format(f), except_str,
file=sys.stderr)
for f in _temp_dirs:
try:
assert os.path.exists(f) and os.path.isabs(f) and \
os.path.dirname(f) == temp_location
shutil.rmtree(f, ignore_errors=1)
except:
except_str = "{0[0]}: \"{0[1]}\"".format(sys.exc_info())
print("Directory \"{0}\" not removed due to".format(f), except_str,
file=sys.stderr)
_temp_dirs, _temp_files = [], []
atexit.register(cleanup_temp)
################################################################################
#
# CommitDialog
#
################################################################################
class CommitDialog(gnomeglade.Component):
def __init__(self, parent):
gnomeglade.Component.__init__(self, paths.ui_dir("vcview.ui"), "commitdialog")
self.parent = parent
self.widget.set_transient_for( parent.widget.get_toplevel() )
selected = parent._get_selected_files()
topdir = _commonprefix(selected)
selected = [ s[len(topdir):] for s in selected ]
self.changedfiles.set_text( ("(in %s) "%topdir) + " ".join(selected) )
self.widget.show_all()
def run(self):
self.previousentry.child.set_editable(False)
self.previousentry.set_active(0)
self.textview.grab_focus()
buf = self.textview.get_buffer()
buf.place_cursor( buf.get_start_iter() )
buf.move_mark( buf.get_selection_bound(), buf.get_end_iter() )
response = self.widget.run()
msg = buf.get_text(buf.get_start_iter(), buf.get_end_iter(), 0)
if response == gtk.RESPONSE_OK:
self.parent._command_on_selected( self.parent.vc.commit_command(msg) )
if len(msg.strip()):
self.previousentry.prepend_text(msg)
self.widget.destroy()
def on_previousentry_activate(self, gentry):
buf = self.textview.get_buffer()
buf.set_text( gentry.child.get_text() )
COL_LOCATION, COL_STATUS, COL_REVISION, COL_TAG, COL_OPTIONS, COL_END = \
list(range(tree.COL_END, tree.COL_END+6))
class VcTreeStore(tree.DiffTreeStore):
def __init__(self):
tree.DiffTreeStore.__init__(self, 1, [str] * 5)
################################################################################
# filters
################################################################################
entry_modified = lambda x: (x.state >= tree.STATE_NEW) or (x.isdir and (x.state > tree.STATE_NONE))
entry_normal = lambda x: (x.state == tree.STATE_NORMAL)
entry_nonvc = lambda x: (x.state == tree.STATE_NONE) or (x.isdir and (x.state > tree.STATE_IGNORED))
entry_ignored = lambda x: (x.state == tree.STATE_IGNORED) or x.isdir
################################################################################
#
# VcView
#
################################################################################
class VcView(melddoc.MeldDoc, gnomeglade.Component):
# Map action names to VC commands and required arguments list
action_vc_cmds_map = {
"VcCompare": ("diff_command", ()),
"VcCommit": ("commit_command", ("",)),
"VcUpdate": ("update_command", ()),
"VcAdd": ("add_command", ()),
"VcResolved": ("resolved_command", ()),
"VcRemove": ("remove_command", ()),
"VcRevert": ("revert_command", ()),
}
state_actions = {
"flatten": ("VcFlatten", None),
"modified": ("VcShowModified", entry_modified),
"normal": ("VcShowNormal", entry_normal),
"unknown": ("VcShowNonVC", entry_nonvc),
"ignored": ("VcShowIgnored", entry_ignored),
}
def __init__(self, prefs):
melddoc.MeldDoc.__init__(self, prefs)
gnomeglade.Component.__init__(self, paths.ui_dir("vcview.ui"), "vcview")
actions = (
("VcCompare", gtk.STOCK_DIALOG_INFO, _("_Compare"), None, _("Compare selected"), self.on_button_diff_clicked),
("VcCommit", "vc-commit-24", _("Co_mmit"), None, _("Commit"), self.on_button_commit_clicked),
("VcUpdate", "vc-update-24", _("_Update"), None, _("Update"), self.on_button_update_clicked),
("VcAdd", "vc-add-24", _("_Add"), None, _("Add to VC"), self.on_button_add_clicked),
("VcRemove", "vc-remove-24", _("_Remove"), None, _("Remove from VC"), self.on_button_remove_clicked),
("VcResolved", "vc-resolve-24", _("_Resolved"), None, _("Mark as resolved for VC"), self.on_button_resolved_clicked),
("VcRevert", gtk.STOCK_REVERT_TO_SAVED, None, None, _("Revert to original"), self.on_button_revert_clicked),
("VcDeleteLocally", gtk.STOCK_DELETE, None, None, _("Delete locally"), self.on_button_delete_clicked),
)
toggleactions = (
("VcFlatten", gtk.STOCK_GOTO_BOTTOM, _("_Flatten"), None, _("Flatten directories"), self.on_button_flatten_toggled, False),
("VcShowModified","filter-modified-24", _("_Modified"), None, _("Show modified"), self.on_filter_state_toggled, False),
("VcShowNormal", "filter-normal-24", _("_Normal"), None, _("Show normal"), self.on_filter_state_toggled, False),
("VcShowNonVC", "filter-nonvc-24", _("Non _VC"), None, _("Show unversioned files"), self.on_filter_state_toggled, False),
("VcShowIgnored", "filter-ignored-24", _("Ignored"), None, _("Show ignored files"), self.on_filter_state_toggled, False),
)
self.ui_file = paths.ui_dir("vcview-ui.xml")
self.actiongroup = gtk.ActionGroup('VcviewActions')
self.actiongroup.set_translation_domain("meld")
self.actiongroup.add_actions(actions)
self.actiongroup.add_toggle_actions(toggleactions)
for action in ("VcCompare", "VcFlatten", "VcShowModified",
"VcShowNormal", "VcShowNonVC", "VcShowIgnored"):
self.actiongroup.get_action(action).props.is_important = True
for action in ("VcCommit", "VcUpdate", "VcAdd", "VcRemove",
"VcShowModified", "VcShowNormal", "VcShowNonVC",
"VcShowIgnored", "VcResolved"):
button = self.actiongroup.get_action(action)
button.props.icon_name = button.props.stock_id
self.model = VcTreeStore()
self.widget.connect("style-set", self.model.on_style_set)
self.treeview.set_model(self.model)
selection = self.treeview.get_selection()
selection.set_mode(gtk.SELECTION_MULTIPLE)
selection.connect("changed", self.on_treeview_selection_changed)
self.treeview.set_headers_visible(1)
self.treeview.set_search_equal_func(self.treeview_search_cb)
self.current_path, self.prev_path, self.next_path = None, None, None
column = gtk.TreeViewColumn( _("Name") )
renicon = emblemcellrenderer.EmblemCellRenderer()
rentext = gtk.CellRendererText()
column.pack_start(renicon, expand=0)
column.pack_start(rentext, expand=1)
col_index = self.model.column_index
column.set_attributes(renicon,
icon_name=col_index(tree.COL_ICON, 0),
icon_tint=col_index(tree.COL_TINT, 0))
column.set_attributes(rentext,
text=col_index(tree.COL_TEXT, 0),
foreground_gdk=col_index(tree.COL_FG, 0),
style=col_index(tree.COL_STYLE, 0),
weight=col_index(tree.COL_WEIGHT, 0),
strikethrough=col_index(tree.COL_STRIKE, 0))
self.treeview.append_column(column)
def addCol(name, num):
column = gtk.TreeViewColumn(name)
rentext = gtk.CellRendererText()
column.pack_start(rentext, expand=0)
column.set_attributes(rentext, markup=self.model.column_index(num, 0))
self.treeview.append_column(column)
return column
self.treeview_column_location = addCol( _("Location"), COL_LOCATION)
addCol(_("Status"), COL_STATUS)
addCol(_("Rev"), COL_REVISION)
addCol(_("Tag"), COL_TAG)
addCol(_("Options"), COL_OPTIONS)
self.state_filters = []
for s in self.state_actions:
if s in self.prefs.vc_status_filters:
action_name = self.state_actions[s][0]
self.state_filters.append(s)
self.actiongroup.get_action(action_name).set_active(True)
class ConsoleStream(object):
def __init__(self, textview):
self.textview = textview
b = textview.get_buffer()
self.mark = b.create_mark("END", b.get_end_iter(), 0)
def write(self, s):
if s:
b = self.textview.get_buffer()
b.insert(b.get_end_iter(), s)
self.textview.scroll_mark_onscreen( self.mark )
self.consolestream = ConsoleStream(self.consoleview)
self.location = None
self.treeview_column_location.set_visible(self.actiongroup.get_action("VcFlatten").get_active())
if not self.prefs.vc_console_visible:
self.on_console_view_toggle(self.console_hide_box)
self.vc = None
self.valid_vc_actions = tuple()
# VC ComboBox
self.combobox_vcs = gtk.ComboBox()
self.combobox_vcs.lock = True
self.combobox_vcs.set_model(gtk.ListStore(str, object, bool))
cell = gtk.CellRendererText()
self.combobox_vcs.pack_start(cell, False)
self.combobox_vcs.add_attribute(cell, 'text', 0)
self.combobox_vcs.add_attribute(cell, 'sensitive', 2)
self.combobox_vcs.lock = False
self.hbox2.pack_end(self.combobox_vcs, expand=False)
self.combobox_vcs.show()
self.combobox_vcs.connect("changed", self.on_vc_change)
def on_container_switch_in_event(self, ui):
melddoc.MeldDoc.on_container_switch_in_event(self, ui)
self.scheduler.add_task(self.on_treeview_cursor_changed)
def update_actions_sensitivity(self):
"""Disable actions that use not implemented VC plugin methods
"""
valid_vc_actions = ["VcDeleteLocally"]
for action_name, (meth_name, args) in self.action_vc_cmds_map.items():
action = self.actiongroup.get_action(action_name)
try:
getattr(self.vc, meth_name)(*args)
action.props.sensitive = True
valid_vc_actions.append(action_name)
except NotImplementedError:
action.props.sensitive = False
self.valid_vc_actions = tuple(valid_vc_actions)
def choose_vc(self, vcs):
"""Display VC plugin(s) that can handle the location"""
self.combobox_vcs.lock = True
self.combobox_vcs.get_model().clear()
tooltip_texts = [_("Choose one Version Control"),
_("Only one Version Control in this directory")]
default_active = -1
valid_vcs = []
# Try to keep the same VC plugin active on refresh()
for idx, avc in enumerate(vcs):
# See if the necessary version control command exists. If so,
# make sure what we're diffing is a valid respository. If either
# check fails don't let the user select the that version control
# tool and display a basic error message in the drop-down menu.
err_str = ""
if vc._vc.call(["which", avc.CMD]):
# TRANSLATORS: this is an error message when a version control
# application isn't installed or can't be found
err_str = _("%s Not Installed" % avc.CMD)
elif not avc.valid_repo():
# TRANSLATORS: this is an error message when a version
# controlled repository is invalid or corrupted
err_str = _("Invalid Repository")
else:
valid_vcs.append(idx)
if (self.vc is not None and
self.vc.__class__ == avc.__class__):
default_active = idx
if err_str:
self.combobox_vcs.get_model().append( \
[_("%s (%s)") % (avc.NAME, err_str), avc, False])
else:
self.combobox_vcs.get_model().append([avc.NAME, avc, True])
if valid_vcs and default_active == -1:
default_active = min(valid_vcs)
self.combobox_vcs.set_tooltip_text(tooltip_texts[len(vcs) == 1])
self.combobox_vcs.set_sensitive(len(vcs) > 1)
self.combobox_vcs.lock = False
self.combobox_vcs.set_active(default_active)
def on_vc_change(self, cb):
if not cb.lock:
self.vc = cb.get_model()[cb.get_active_iter()][1]
self._set_location(self.vc.location)
self.update_actions_sensitivity()
def set_location(self, location):
self.choose_vc(vc.get_vcs(os.path.abspath(location or ".")))
def _set_location(self, location):
self.location = location
self.current_path = None
self.model.clear()
self.fileentry.set_filename(location)
self.fileentry.prepend_history(location)
it = self.model.add_entries( None, [location] )
self.treeview.grab_focus()
self.treeview.get_selection().select_iter(it)
self.model.set_path_state(it, 0, tree.STATE_NORMAL, isdir=1)
self.recompute_label()
self.scheduler.remove_all_tasks()
# If the user is just diffing a file (ie not a directory), there's no
# need to scan the rest of the repository
if os.path.isdir(self.vc.location):
root = self.model.get_iter_root()
self.scheduler.add_task(self._search_recursively_iter(root))
self.scheduler.add_task(self.on_treeview_cursor_changed)
def get_comparison(self):
return recent.TYPE_VC, [self.location]
def recompute_label(self):
self.label_text = os.path.basename(self.location)
# TRANSLATORS: This is the location of the directory the user is diffing
self.tooltip_text = _("%s: %s") % (_("Location"), self.location)
self.label_changed()
def _search_recursively_iter(self, iterstart):
yield _("[%s] Scanning %s") % (self.label_text,"")
rootpath = self.model.get_path( iterstart )
rootname = self.model.value_path( self.model.get_iter(rootpath), 0 )
prefixlen = 1 + len( self.model.value_path( self.model.get_iter_root(), 0 ) )
todo = [ (rootpath, rootname) ]
active_action = lambda a: self.actiongroup.get_action(a).get_active()
filters = [a[1] for a in self.state_actions.values() if \
active_action(a[0]) and a[1]]
def showable(entry):
for f in filters:
if f(entry): return 1
recursive = self.actiongroup.get_action("VcFlatten").get_active()
self.vc.cache_inventory(rootname)
while len(todo):
todo.sort() # depth first
path, name = todo.pop(0)
if path:
it = self.model.get_iter( path )
root = self.model.value_path( it, 0 )
else:
it = self.model.get_iter_root()
root = name
yield _("[%s] Scanning %s") % (self.label_text, root[prefixlen:])
entries = [f for f in self.vc.listdir(root) if showable(f)]
differences = 0
for e in entries:
differences |= (e.state != tree.STATE_NORMAL)
if e.isdir and recursive:
todo.append( (None, e.path) )
continue
child = self.model.add_entries(it, [e.path])
self._update_item_state( child, e, root[prefixlen:] )
if e.isdir:
todo.append( (self.model.get_path(child), None) )
if not recursive: # expand parents
if len(entries) == 0:
self.model.add_empty(it, _("(Empty)"))
if differences or len(path)==1:
self.treeview.expand_to_path(path)
else: # just the root
self.treeview.expand_row( (0,), 0)
self.vc.uncache_inventory()
def on_fileentry_activate(self, fileentry):
path = fileentry.get_full_path()
self.set_location(path)
def on_delete_event(self, appquit=0):
self.scheduler.remove_all_tasks()
return gtk.RESPONSE_OK
def on_row_activated(self, treeview, path, tvc):
it = self.model.get_iter(path)
if self.model.iter_has_child(it):
if self.treeview.row_expanded(path):
self.treeview.collapse_row(path)
else:
self.treeview.expand_row(path,0)
else:
path = self.model.value_path(it, 0)
self.run_diff( [path] )
def run_diff_iter(self, path_list):
silent_error = hasattr(self.vc, 'switch_to_external_diff')
retry_diff = True
while retry_diff:
retry_diff = False
yield _("[%s] Fetching differences") % self.label_text
diffiter = self._command_iter(self.vc.diff_command(), path_list, 0)
diff = None
while type(diff) != type(()):
diff = next(diffiter)
yield 1
prefix, patch = diff[0], diff[1]
try:
patch = self.vc.clean_patch(patch)
except AttributeError:
pass
yield _("[%s] Applying patch") % self.label_text
if patch:
applied = self.show_patch(prefix, patch, silent=silent_error)
if not applied and silent_error:
silent_error = False
self.vc.switch_to_external_diff()
retry_diff = True
else:
for path in path_list:
self.emit("create-diff", [path])
def run_diff(self, path_list):
try:
for path in path_list:
comp_path = self.vc.get_path_for_repo_file(path)
os.chmod(comp_path, 0o444)
_temp_files.append(comp_path)
self.emit("create-diff", [comp_path, path])
except NotImplementedError:
for path in path_list:
self.scheduler.add_task(self.run_diff_iter([path]), atfront=1)
def on_treeview_popup_menu(self, treeview):
time = gtk.get_current_event_time()
self.popup_menu.popup(None, None, None, 0, time)
return True
def on_button_press_event(self, treeview, event):
if event.button == 3:
path = treeview.get_path_at_pos(int(event.x), int(event.y))
if path is None:
return False
selection = treeview.get_selection()
model, rows = selection.get_selected_rows()
if path[0] not in rows:
selection.unselect_all()
selection.select_path(path[0])
treeview.set_cursor(path[0])
self.popup_menu.popup(None, None, None, event.button, event.time)
return True
return False
def on_button_flatten_toggled(self, button):
action = self.actiongroup.get_action("VcFlatten")
self.treeview_column_location.set_visible(action.get_active())
self.on_filter_state_toggled(button)
def on_filter_state_toggled(self, button):
active_action = lambda a: self.actiongroup.get_action(a).get_active()
active_filters = [a for a in self.state_actions if \
active_action(self.state_actions[a][0])]
if set(active_filters) == set(self.state_filters):
return
self.state_filters = active_filters
self.prefs.vc_status_filters = active_filters
self.refresh()
def on_treeview_selection_changed(self, selection):
model, rows = selection.get_selected_rows()
have_selection = bool(rows)
for action in self.valid_vc_actions:
self.actiongroup.get_action(action).set_sensitive(have_selection)
def _get_selected_files(self):
model, rows = self.treeview.get_selection().get_selected_rows()
sel = [self.model.value_path(self.model.get_iter(r), 0) for r in rows]
# Remove empty entries and trailing slashes
return [x[-1] != "/" and x or x[:-1] for x in sel if x is not None]
def _command_iter(self, command, files, refresh):
"""Run 'command' on 'files'. Return a tuple of the directory the
command was executed in and the output of the command.
"""
msg = misc.shelljoin(command)
yield "[%s] %s" % (self.label_text, msg.replace("\n", "\t"))
def relpath(pbase, p):
kill = 0
if len(pbase) and p.startswith(pbase):
kill = len(pbase) + 1
return p[kill:] or "."
if len(files) == 1 and os.path.isdir(files[0]):
workdir = self.vc.get_working_directory(files[0])
else:
workdir = self.vc.get_working_directory( _commonprefix(files) )
files = [ relpath(workdir, f) for f in files ]
r = None
self.consolestream.write( misc.shelljoin(command+files) + " (in %s)\n" % workdir)
readiter = misc.read_pipe_iter(command + files, self.consolestream,
workdir=workdir)
try:
while r is None:
r = next(readiter)
self.consolestream.write(r)
yield 1
except IOError as e:
misc.run_dialog("Error running command.\n'%s'\n\nThe error was:\n%s" % ( misc.shelljoin(command), e),
parent=self, messagetype=gtk.MESSAGE_ERROR)
if refresh:
self.refresh_partial(workdir)
yield workdir, r
def _command(self, command, files, refresh=1):
"""Run 'command' on 'files'.
"""
self.scheduler.add_task(self._command_iter(command, files, refresh))
def _command_on_selected(self, command, refresh=1):
files = self._get_selected_files()
if len(files):
self._command(command, files, refresh)
def on_button_update_clicked(self, obj):
self._command_on_selected(self.vc.update_command())
def on_button_commit_clicked(self, obj):
CommitDialog(self).run()
def on_button_add_clicked(self, obj):
self._command_on_selected(self.vc.add_command())
def on_button_remove_clicked(self, obj):
self._command_on_selected(self.vc.remove_command())
def on_button_resolved_clicked(self, obj):
self._command_on_selected(self.vc.resolved_command())
def on_button_revert_clicked(self, obj):
self._command_on_selected(self.vc.revert_command())
def on_button_delete_clicked(self, obj):
files = self._get_selected_files()
for name in files:
try:
if os.path.isfile(name):
os.remove(name)
elif os.path.isdir(name):
if misc.run_dialog(_("'%s' is a directory.\nRemove recursively?") % os.path.basename(name),
parent = self,
buttonstype=gtk.BUTTONS_OK_CANCEL) == gtk.RESPONSE_OK:
shutil.rmtree(name)
except OSError as e:
misc.run_dialog(_("Error removing %s\n\n%s.") % (name,e), parent = self)
workdir = _commonprefix(files)
self.refresh_partial(workdir)
def on_button_diff_clicked(self, obj):
files = self._get_selected_files()
if len(files):
self.run_diff(files)
def open_external(self):
self._open_files(self._get_selected_files())
def show_patch(self, prefix, patch, silent=False):
if vc._vc.call(["which", "patch"]):
primary = _("Patch tool not found")
secondary = _("Meld needs the <i>patch</i> tool to be installed "
"to perform comparisons in %s repositories. Please "
"install <i>patch</i> and try again.") % self.vc.NAME
msgarea = self.msgarea_mgr.new_from_text_and_icon(
gtk.STOCK_DIALOG_ERROR, primary, secondary)
msgarea.add_button(_("Hi_de"), gtk.RESPONSE_CLOSE)
msgarea.connect("response", lambda *args: self.msgarea_mgr.clear())
msgarea.show_all()
return False
tmpdir = tempfile.mkdtemp("-meld")
_temp_dirs.append(tmpdir)
diffs = []
for fname in self.vc.get_patch_files(patch):
destfile = os.path.join(tmpdir,fname)
destdir = os.path.dirname( destfile )
if not os.path.exists(destdir):
os.makedirs(destdir)
pathtofile = os.path.join(prefix, fname)
try:
shutil.copyfile( pathtofile, destfile)
except IOError: # it is missing, create empty file
open(destfile,"w").close()
diffs.append( (destfile, pathtofile) )
patchcmd = self.vc.patch_command(tmpdir)
try:
result = misc.write_pipe(patchcmd, patch, error=misc.NULL)
except OSError:
result = 1
if result == 0:
for d in diffs:
os.chmod(d[0], 0o444)
self.emit("create-diff", d)
return True
elif not silent:
primary = _("Error fetching original comparison file")
secondary = _("Meld couldn't obtain the original version of your "
"comparison file. If you are using the most recent "
"version of Meld, please report a bug, including as "
"many details as possible.")
msgarea = self.msgarea_mgr.new_from_text_and_icon(
gtk.STOCK_DIALOG_ERROR, primary, secondary)
msgarea.add_button(_("Hi_de"), gtk.RESPONSE_CLOSE)
msgarea.add_button(_("Report a bug"), gtk.RESPONSE_OK)
def patch_error_cb(msgarea, response):
if response == gtk.RESPONSE_OK:
bug_url = "https://bugzilla.gnome.org/enter_bug.cgi?" + \
"product=meld"
misc.open_uri(bug_url)
else:
self.msgarea_mgr.clear()
msgarea.connect("response", patch_error_cb)
msgarea.show_all()
return False
def refresh(self):
self.set_location( self.model.value_path( self.model.get_iter_root(), 0 ) )
def refresh_partial(self, where):
if not self.actiongroup.get_action("VcFlatten").get_active():
it = self.find_iter_by_name( where )
if it:
newiter = self.model.insert_after( None, it)
self.model.set_value(newiter, self.model.column_index( tree.COL_PATH, 0), where)
self.model.set_path_state(newiter, 0, tree.STATE_NORMAL, True)
self.model.remove(it)
self.scheduler.add_task(self._search_recursively_iter(newiter))
else: # XXX fixme
self.refresh()
def _update_item_state(self, it, vcentry, location):
e = vcentry
self.model.set_path_state(it, 0, e.state, e.isdir)
def setcol(col, val):
self.model.set_value(it, self.model.column_index(col, 0), val)
setcol(COL_LOCATION, location)
setcol(COL_STATUS, e.get_status())
setcol(COL_REVISION, e.rev)
setcol(COL_TAG, e.tag)
setcol(COL_OPTIONS, e.options)
def on_file_changed(self, filename):
it = self.find_iter_by_name(filename)
if it:
path = self.model.value_path(it, 0)
self.vc.update_file_state(path)
files = self.vc.lookup_files([], [(os.path.basename(path), path)])[1]
for e in files:
if e.path == path:
prefixlen = 1 + len( self.model.value_path( self.model.get_iter_root(), 0 ) )
self._update_item_state( it, e, e.parent[prefixlen:])
return
def find_iter_by_name(self, name):
it = self.model.get_iter_root()
path = self.model.value_path(it, 0)
while it:
if name == path:
return it
elif name.startswith(path):
child = self.model.iter_children( it )
while child:
path = self.model.value_path(child, 0)
if name == path:
return child
elif name.startswith(path):
break
else:
child = self.model.iter_next( child )
it = child
else:
break
return None
def on_console_view_toggle(self, box, event=None):
if box == self.console_hide_box:
self.prefs.vc_console_visible = 0
self.console_hbox.hide()
self.console_show_box.show()
else:
self.prefs.vc_console_visible = 1
self.console_hbox.show()
self.console_show_box.hide()
def on_consoleview_populate_popup(self, text, menu):
item = gtk.ImageMenuItem(gtk.STOCK_CLEAR)
def activate(*args):
buf = text.get_buffer()
buf.delete( buf.get_start_iter(), buf.get_end_iter() )
item.connect("activate", activate)
item.show()
menu.insert( item, 0 )
item = gtk.SeparatorMenuItem()
item.show()
menu.insert( item, 1 )
def on_treeview_cursor_changed(self, *args):
cursor_path, cursor_col = self.treeview.get_cursor()
if not cursor_path:
self.emit("next-diff-changed", False, False)
self.current_path = cursor_path
return
# If invoked directly rather than through a callback, we always check
if not args:
skip = False
else:
try:
old_cursor = self.model.get_iter(self.current_path)
except (ValueError, TypeError):
# An invalid path gives ValueError; None gives a TypeError
skip = False
else:
# We can skip recalculation if the new cursor is between
# the previous/next bounds, and we weren't on a changed row
state = self.model.get_state(old_cursor, 0)
if state not in (tree.STATE_NORMAL, tree.STATE_EMPTY):
skip = False
else:
if self.prev_path is None and self.next_path is None:
skip = True
elif self.prev_path is None:
skip = cursor_path < self.next_path
elif self.next_path is None:
skip = self.prev_path < cursor_path
else:
skip = self.prev_path < cursor_path < self.next_path
if not skip:
prev, next = self.model._find_next_prev_diff(cursor_path)
self.prev_path, self.next_path = prev, next
have_next_diffs = (prev is not None, next is not None)
self.emit("next-diff-changed", *have_next_diffs)
self.current_path = cursor_path
def next_diff(self, direction):
if direction == gtk.gdk.SCROLL_UP:
path = self.prev_path
else:
path = self.next_path
if path:
self.treeview.expand_to_path(path)
self.treeview.set_cursor(path)
def on_reload_activate(self, *extra):
self.on_fileentry_activate(self.fileentry)
def on_find_activate(self, *extra):
self.treeview.emit("start-interactive-search")
def treeview_search_cb(self, model, column, key, it):
"""Callback function for searching in VcView treeview"""
path = model.get_value(it, tree.COL_PATH)
# if query text contains slash, search in full path
if key.find('/') >= 0:
lineText = path
else:
lineText = os.path.basename(path)
# Perform case-insensitive matching if query text is all lower-case
if key.islower():
lineText = lineText.lower()
if lineText.find(key) >= 0:
# line matches
return False
else:
return True
|
pedrox/meld
|
meld/vcview.py
|
Python
|
gpl-2.0
| 35,046
| 0.005364
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bleeding-edge version of Unicode Character Database.
Provides an interface similar to Python's own unicodedata package, but with
the bleeding-edge data. The implementation is not efficient at all, it's
just done this way for the ease of use. The data is coming from bleeding
edge version of the Unicode Standard not yet published, so it is expected to
be unstable and sometimes inconsistent.
"""
__author__ = (
"roozbeh@google.com (Roozbeh Pournader) and " "cibu@google.com (Cibu Johny)"
)
import codecs
import collections
import os
from os import path
import re
from nototools.py23 import unichr, unicode, basestring
try:
import unicodedata2 as unicodedata # Unicode 8 compliant native lib
except ImportError:
import unicodedata # Python's internal library
from nototools import tool_utils # parse_int_ranges
# Update this when we update the base version data we use
UNICODE_VERSION = 14.0
_data_is_loaded = False
_property_value_aliases_data = {}
_character_names_data = {}
_general_category_data = {}
_combining_class_data = {}
_decomposition_data = {}
_bidi_mirroring_characters = set()
_script_data = {}
_script_extensions_data = {}
_block_data = {}
_block_range = {}
_block_names = []
_age_data = {}
_bidi_mirroring_glyph_data = {}
_core_properties_data = {}
_indic_positional_data = {}
_indic_syllabic_data = {}
_defined_characters = set()
_script_code_to_long_name = {}
_folded_script_name_to_code = {}
_lower_to_upper_case = {}
# emoji data
_presentation_default_emoji = None
_presentation_default_text = None
_emoji_modifier_base = None
_emoji = None
_emoji_variants = None
_emoji_variants_proposed = None
# non-emoji variant data
_variant_data = None
_variant_data_cps = None
# proposed emoji
_proposed_emoji_data = None
_proposed_emoji_data_cps = None
# emoji sequences
_emoji_sequence_data = None
_emoji_non_vs_to_canonical = None
_emoji_group_data = None
# nameslist/namealiases
_nameslist_see_also = None
_namealiases_alt_names = None
def load_data():
"""Loads the data files needed for the module.
Could be used by processes that care about controlling when the data is
loaded. Otherwise, data will be loaded the first time it's needed.
"""
global _data_is_loaded
if not _data_is_loaded:
_load_property_value_aliases_txt()
_load_unicode_data_txt()
_load_scripts_txt()
_load_script_extensions_txt()
_load_blocks_txt()
_load_derived_age_txt()
_load_derived_core_properties_txt()
_load_bidi_mirroring_txt()
_load_indic_data()
_load_emoji_data()
_load_emoji_sequence_data()
_load_unicode_emoji_variants()
_load_variant_data()
_load_proposed_emoji_data()
_load_nameslist_data()
_load_namealiases_data()
_data_is_loaded = True
def name(char, *args):
"""Returns the name of a character.
Raises a ValueError exception if the character is undefined, unless an
extra argument is given, in which case it will return that argument.
"""
if isinstance(char, int):
char = unichr(char)
# First try and get the name from unidata, which is faster and supports
# CJK and Hangul automatic names
try:
return unicodedata.name(char)
except ValueError as val_error:
cp = ord(char)
load_data()
if cp in _character_names_data:
return _character_names_data[cp]
elif (cp,) in _emoji_sequence_data:
return _emoji_sequence_data[(cp,)][0]
elif args:
return args[0]
else:
raise Exception('no name for "%0x"' % ord(char))
def _char_to_int(char):
"""Converts a potential character to its scalar value."""
if type(char) in [str, type(u"")]:
return ord(char)
else:
return char
def derived_props():
load_data()
return frozenset(_core_properties_data.keys())
def chars_with_property(propname):
load_data()
return frozenset(_core_properties_data[propname])
def category(char):
"""Returns the general category of a character."""
load_data()
char = _char_to_int(char)
try:
return _general_category_data[char]
except KeyError:
return "Cn" # Unassigned
def combining(char):
"""Returns the canonical combining class of a character."""
load_data()
char = _char_to_int(char)
try:
return _combining_class_data[char]
except KeyError:
return 0
def to_upper(char):
"""Returns the upper case for a lower case character.
This is not full upper casing, but simply reflects the 1-1
mapping in UnicodeData.txt."""
load_data()
cp = _char_to_int(char)
try:
if _general_category_data[cp] == "Ll":
return unichr(_lower_to_upper_case[cp])
except KeyError:
pass
return char
def canonical_decomposition(char):
"""Returns the canonical decomposition of a character as a Unicode string."""
load_data()
char = _char_to_int(char)
try:
return _decomposition_data[char]
except KeyError:
return u""
def script(char):
"""Returns the script property of a character as a four-letter code."""
load_data()
char = _char_to_int(char)
try:
return _script_data[char]
except KeyError:
return "Zzzz" # Unknown
def script_extensions(char):
"""Returns the script extensions property of a character.
The return value is a frozenset of four-letter script codes.
"""
load_data()
char = _char_to_int(char)
try:
return _script_extensions_data[char]
except KeyError:
return frozenset([script(char)])
def block(char):
"""Returns the block property of a character."""
load_data()
char = _char_to_int(char)
try:
return _block_data[char]
except KeyError:
return "No_Block"
def block_range(block):
"""Returns a range (first, last) of the named block."""
load_data()
return _block_range[block]
def block_chars(block):
"""Returns a frozenset of the cps in the named block."""
load_data()
first, last = _block_range[block]
return frozenset(range(first, last + 1))
def block_names():
"""Returns the names of the blocks in block order."""
load_data()
return _block_names[:]
def age(char):
"""Returns the age property of a character as a string.
Returns None if the character is unassigned."""
load_data()
char = _char_to_int(char)
try:
return _age_data[char]
except KeyError:
return None
# Uniscribe treats these ignorables (Hangul fillers) as spacing.
UNISCRIBE_USED_IGNORABLES = frozenset([0x115F, 0x1160, 0x3164, 0xFFA0])
def is_default_ignorable(char):
"""Returns true if the character has the Default_Ignorable property."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
return char in _core_properties_data["Default_Ignorable_Code_Point"]
def default_ignorables():
load_data()
return frozenset(_core_properties_data["Default_Ignorable_Code_Point"])
def is_defined(char):
"""Returns true if the character is defined in the Unicode Standard."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
return char in _defined_characters
def is_private_use(char):
"""Returns true if the characters is a private use character."""
return category(char) == "Co"
def mirrored(char):
"""Returns 1 if the characters is bidi mirroring, 0 otherwise."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
return int(char in _bidi_mirroring_characters)
def bidi_mirroring_glyph(char):
"""Returns the bidi mirroring glyph property of a character."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
try:
return _bidi_mirroring_glyph_data[char]
except KeyError:
return None
def mirrored_chars():
return frozenset(_bidi_mirroring_glyph_data.keys())
def indic_positional_category(char):
"""Returns the Indic positional category of a character."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
try:
return _indic_positional_data[char]
except KeyError:
return "NA"
def indic_syllabic_category(char):
"""Returns the Indic syllabic category of a character."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
try:
return _bidi_syllabic_data[char]
except KeyError:
return "Other"
def create_script_to_chars():
"""Returns a mapping from script to defined characters, based on script and
extensions, for all scripts."""
load_data()
result = collections.defaultdict(set)
for cp in _defined_characters:
if cp in _script_data:
result[_script_data[cp]].add(cp)
if cp in _script_extensions_data:
for script in _script_extensions_data[cp]:
result[script].add(cp)
return result
_DEFINED_CHARACTERS_CACHE = {}
def defined_characters(version=None, scr=None):
"""Returns the set of all defined characters in the Unicode Standard."""
load_data()
# handle common error where version is passed as string, the age test
# will always pass
if version is not None:
version = float(version)
try:
return _DEFINED_CHARACTERS_CACHE[(version, scr)]
except KeyError:
pass
characters = _defined_characters
if version is not None:
characters = {
char
for char in characters
if age(char) is not None and float(age(char)) <= version
}
if scr is not None:
characters = {
char
for char in characters
if script(char) == scr or scr in script_extensions(char)
}
characters = frozenset(characters)
_DEFINED_CHARACTERS_CACHE[(version, scr)] = characters
return characters
_strip_re = re.compile(r"[-'_ ]+")
def _folded_script_name(script_name):
"""Folds a script name to its bare bones for comparison."""
# string.translate is changed by codecs, the method no longer takes two
# parameters and so script_name.translate(None, "'-_ ") fails to compile
return _strip_re.sub("", script_name).lower()
def script_code(script_name):
"""Returns the four-letter ISO 15924 code of a script from its long name."""
load_data()
folded_script_name = _folded_script_name(script_name)
try:
return _HARD_CODED_FOLDED_SCRIPT_NAME_TO_CODE[folded_script_name]
except:
return _folded_script_name_to_code.get(folded_script_name, "Zzzz")
# We use some standard script codes that are not assigned to a codepoint
# by unicode, e.g. Zsym. The data based off Scripts.txt doesn't contain
# these so we add them here. There are also a few names with punctuation
# that we special-case
_HARD_CODED_HUMAN_READABLE_SCRIPT_NAMES = {
"Aran": "Nastaliq", # not assigned
"Nkoo": "N'Ko",
"Phag": "Phags-pa",
"Piqd": "Klingon", # not assigned
"Zmth": "Math", # not assigned
"Zsye": "Emoji", # not assigned
"Zsym": "Symbols", # not assigned
}
_HARD_CODED_FOLDED_SCRIPT_NAME_TO_CODE = {
_folded_script_name(name): code
for code, name in _HARD_CODED_HUMAN_READABLE_SCRIPT_NAMES.items()
}
def human_readable_script_name(code):
"""Returns a human-readable name for the script code."""
try:
return _HARD_CODED_HUMAN_READABLE_SCRIPT_NAMES[code]
except KeyError:
load_data()
return _script_code_to_long_name[code]
def all_scripts():
"""Return a frozenset of all four-letter script codes."""
load_data()
return frozenset(_script_code_to_long_name.keys())
_DATA_DIR_PATH = path.join(
path.abspath(path.dirname(__file__)), os.pardir, "third_party", "ucd"
)
def open_unicode_data_file(data_file_name):
"""Opens a Unicode data file.
Args:
data_file_name: A string containing the filename of the data file.
Returns:
A file handle to the data file.
"""
filename = path.join(_DATA_DIR_PATH, data_file_name)
return codecs.open(filename, "r", "utf-8")
def _parse_code_ranges(input_data):
"""Reads Unicode code ranges with properties from an input string.
Reads a Unicode data file already imported into a string. The format is
the typical Unicode data file format with either one character or a
range of characters separated by a semicolon with a property value (and
potentially comments after a number sign, that will be ignored).
Example source data file:
http://www.unicode.org/Public/UNIDATA/Scripts.txt
Example data:
0000..001F ; Common # Cc [32] <control-0000>..<control-001F>
0020 ; Common # Zs SPACE
Args:
input_data: An input string, containing the data.
Returns:
A list of tuples corresponding to the input data, with each tuple
containing the beginning of the range, the end of the range, and the
property value for the range. For example:
[(0, 31, 'Common'), (32, 32, 'Common')]
"""
ranges = []
line_regex = re.compile(
r"^"
r"([0-9A-F]{4,6})" # first character code
r"(?:\.\.([0-9A-F]{4,6}))?" # optional second character code
r"\s*;\s*"
r"([^#]+)"
) # the data, up until the potential comment
for line in input_data.split("\n"):
match = line_regex.match(line)
if not match:
continue
first, last, data = match.groups()
if last is None:
last = first
first = int(first, 16)
last = int(last, 16)
data = data.rstrip()
ranges.append((first, last, data))
return ranges
def _parse_semicolon_separated_data(input_data):
"""Reads semicolon-separated Unicode data from an input string.
Reads a Unicode data file already imported into a string. The format is
the Unicode data file format with a list of values separated by
semicolons. The number of the values on different lines may be different
from another.
Example source data file:
http://www.unicode.org/Public/UNIDATA/PropertyValueAliases.txt
Example data:
sc; Cher ; Cherokee
sc; Copt ; Coptic ; Qaac
Args:
input_data: An input string, containing the data.
Returns:
A list of lists corresponding to the input data, with each individual
list containing the values as strings. For example:
[['sc', 'Cher', 'Cherokee'], ['sc', 'Copt', 'Coptic', 'Qaac']]
"""
all_data = []
for line in input_data.split("\n"):
line = line.split("#", 1)[0].strip() # remove the comment
if not line:
continue
fields = line.split(";")
fields = [field.strip() for field in fields]
all_data.append(fields)
return all_data
def _load_unicode_data_txt():
"""Load character data from UnicodeData.txt."""
global _defined_characters
global _bidi_mirroring_characters
if _defined_characters:
return
with open_unicode_data_file("UnicodeData.txt") as unicode_data_txt:
unicode_data = _parse_semicolon_separated_data(unicode_data_txt.read())
for line in unicode_data:
code = int(line[0], 16)
char_name = line[1]
general_category = line[2]
combining_class = int(line[3])
decomposition = line[5]
if decomposition.startswith("<"):
# We only care about canonical decompositions
decomposition = ""
decomposition = decomposition.split()
decomposition = [unichr(int(char, 16)) for char in decomposition]
decomposition = "".join(decomposition)
bidi_mirroring = line[9] == "Y"
if general_category == "Ll":
upcode = line[12]
if upcode:
upper_case = int(upcode, 16)
_lower_to_upper_case[code] = upper_case
if char_name.endswith("First>"):
last_range_opener = code
elif char_name.endswith("Last>"):
# Ignore surrogates
if "Surrogate" not in char_name:
for char in range(last_range_opener, code + 1):
_general_category_data[char] = general_category
_combining_class_data[char] = combining_class
if bidi_mirroring:
_bidi_mirroring_characters.add(char)
_defined_characters.add(char)
else:
_character_names_data[code] = char_name
_general_category_data[code] = general_category
_combining_class_data[code] = combining_class
if bidi_mirroring:
_bidi_mirroring_characters.add(code)
_decomposition_data[code] = decomposition
_defined_characters.add(code)
_defined_characters = frozenset(_defined_characters)
_bidi_mirroring_characters = frozenset(_bidi_mirroring_characters)
def _load_scripts_txt():
"""Load script property from Scripts.txt."""
with open_unicode_data_file("Scripts.txt") as scripts_txt:
script_ranges = _parse_code_ranges(scripts_txt.read())
for first, last, script_name in script_ranges:
folded_script_name = _folded_script_name(script_name)
script = _folded_script_name_to_code[folded_script_name]
for char_code in range(first, last + 1):
_script_data[char_code] = script
def _load_script_extensions_txt():
"""Load script property from ScriptExtensions.txt."""
with open_unicode_data_file("ScriptExtensions.txt") as se_txt:
script_extensions_ranges = _parse_code_ranges(se_txt.read())
for first, last, script_names in script_extensions_ranges:
script_set = frozenset(script_names.split(" "))
for character_code in range(first, last + 1):
_script_extensions_data[character_code] = script_set
def _load_blocks_txt():
"""Load block name from Blocks.txt."""
with open_unicode_data_file("Blocks.txt") as blocks_txt:
block_ranges = _parse_code_ranges(blocks_txt.read())
for first, last, block_name in block_ranges:
_block_names.append(block_name)
_block_range[block_name] = (first, last)
for character_code in range(first, last + 1):
_block_data[character_code] = block_name
def _load_derived_age_txt():
"""Load age property from DerivedAge.txt."""
with open_unicode_data_file("DerivedAge.txt") as derived_age_txt:
age_ranges = _parse_code_ranges(derived_age_txt.read())
for first, last, char_age in age_ranges:
for char_code in range(first, last + 1):
_age_data[char_code] = char_age
def _load_derived_core_properties_txt():
"""Load derived core properties from Blocks.txt."""
with open_unicode_data_file("DerivedCoreProperties.txt") as dcp_txt:
dcp_ranges = _parse_code_ranges(dcp_txt.read())
for first, last, property_name in dcp_ranges:
for character_code in range(first, last + 1):
try:
_core_properties_data[property_name].add(character_code)
except KeyError:
_core_properties_data[property_name] = {character_code}
def _load_property_value_aliases_txt():
"""Load property value aliases from PropertyValueAliases.txt."""
with open_unicode_data_file("PropertyValueAliases.txt") as pva_txt:
aliases = _parse_semicolon_separated_data(pva_txt.read())
for data_item in aliases:
if data_item[0] == "sc": # Script
code = data_item[1]
long_name = data_item[2]
_script_code_to_long_name[code] = long_name.replace("_", " ")
folded_name = _folded_script_name(long_name)
_folded_script_name_to_code[folded_name] = code
def _load_bidi_mirroring_txt():
"""Load bidi mirroring glyphs from BidiMirroring.txt."""
with open_unicode_data_file("BidiMirroring.txt") as bidi_mirroring_txt:
bmg_pairs = _parse_semicolon_separated_data(bidi_mirroring_txt.read())
for char, bmg in bmg_pairs:
char = int(char, 16)
bmg = int(bmg, 16)
_bidi_mirroring_glyph_data[char] = bmg
def _load_indic_data():
"""Load Indic properties from Indic(Positional|Syllabic)Category.txt."""
with open_unicode_data_file("IndicPositionalCategory.txt") as inpc_txt:
positional_ranges = _parse_code_ranges(inpc_txt.read())
for first, last, char_position in positional_ranges:
for char_code in range(first, last + 1):
_indic_positional_data[char_code] = char_position
with open_unicode_data_file("IndicSyllabicCategory.txt") as insc_txt:
syllabic_ranges = _parse_code_ranges(insc_txt.read())
for first, last, char_syllabic_category in syllabic_ranges:
for char_code in range(first, last + 1):
_indic_syllabic_data[char_code] = char_syllabic_category
def _load_emoji_data():
"""Parse the new draft format of emoji-data.txt"""
global _presentation_default_emoji, _presentation_default_text
global _emoji, _emoji_modifier_base
if _presentation_default_emoji:
return
emoji_sets = {
"Emoji": set(),
"Emoji_Presentation": set(),
"Emoji_Modifier": set(),
"Emoji_Modifier_Base": set(),
"Extended_Pictographic": set(),
"Emoji_Component": set(),
}
set_names = "|".join(sorted(emoji_sets.keys()))
line_re = re.compile(
r"([0-9A-F]{4,6})(?:\.\.([0-9A-F]{4,6}))?\s*;\s*" r"(%s)\s*#.*$" % set_names
)
with open_unicode_data_file("emoji-data.txt") as f:
for line in f:
line = line.strip()
if not line or line[0] == "#":
continue
m = line_re.match(line)
if not m:
raise ValueError('Did not match "%s"' % line)
start = int(m.group(1), 16)
end = start if not m.group(2) else int(m.group(2), 16)
emoji_set = emoji_sets.get(m.group(3))
emoji_set.update(range(start, end + 1))
# allow our legacy use of handshake and wrestlers with skin tone modifiers
emoji_sets["Emoji_Modifier_Base"] |= {0x1F91D, 0x1F93C}
_presentation_default_emoji = frozenset(emoji_sets["Emoji_Presentation"])
_presentation_default_text = frozenset(
emoji_sets["Emoji"] - emoji_sets["Emoji_Presentation"]
)
_emoji_modifier_base = frozenset(emoji_sets["Emoji_Modifier_Base"])
_emoji = frozenset(emoji_sets["Emoji"])
# we have no real use for the 'Emoji_Regional_Indicator' and
# 'Emoji_Component' sets, and they're not documented, so ignore them.
# The regional indicator set is just the 26 regional indicator
# symbols, and the component set is number sign, asterisk, ASCII digits,
# the regional indicators, and the skin tone modifiers.
PROPOSED_EMOJI_AGE = 1000.0
ZWJ = 0x200D
EMOJI_VS = 0xFE0F
EMOJI_SEQUENCE_TYPES = frozenset(
[
"Basic_Emoji",
"Emoji_Keycap_Sequence",
"Emoji_Combining_Sequence",
"Emoji_Flag_Sequence",
"RGI_Emoji_Flag_Sequence",
"RGI_Emoji_Tag_Sequence",
"Emoji_Modifier_Sequence",
"RGI_Emoji_Modifier_Sequence",
"RGI_Emoji_ZWJ_Sequence",
"Emoji_ZWJ_Sequence",
"Emoji_Single_Sequence",
]
)
# Unicode 12 decided to be 'helpful' and included single emoji in the sequence
# data, but unlike all the other data represents these in batches as XXXX..XXXX
# rather than one per line. We can't get name data for these so we can't
# use that data, but still have to parse the line.
def _read_emoji_data(lines):
"""Parse lines of emoji data and return a map from sequence to tuples of
name, age, type."""
line_re = re.compile(
r"(?:([0-9A-F ]+)|([0-9A-F]+\.\.[0-9A-F]+)\s*);\s*(%s)\s*;\s*([^#]*)\s*#\s*E?(\d+\.\d+).*"
% "|".join(EMOJI_SEQUENCE_TYPES)
)
result = {}
for line in lines:
line = line.strip()
if not line or line[0] == "#":
continue
m = line_re.match(line)
if not m:
raise ValueError('"%s" Did not match "%s"' % (line_re.pattern, line))
# group 1 is a sequence, group 2 is a range of single character sequences.
# we can't process the range because we don't have a name for each character
# in the range, so skip it and get these emoji and their names from
# UnicodeData
if m.group(2):
continue
seq_type = m.group(3).strip().encode("ascii")
seq = tuple(int(s, 16) for s in m.group(1).split())
name = m.group(4).strip()
age = float(m.group(5))
result[seq] = (name, age, seq_type)
return result
def _read_emoji_data_file(filename):
with open_unicode_data_file(filename) as f:
return _read_emoji_data(f.readlines())
_EMOJI_QUAL_TYPES = [
"component",
"fully-qualified",
"minimally-qualified",
"unqualified",
]
def _read_emoji_test_data(data_string):
"""Parse the emoji-test.txt data. This has names of proposed emoji that are
not yet in the full Unicode data file. Returns a list of tuples of
sequence, group, subgroup, name.
The data is a string."""
line_re = re.compile(
r"([0-9a-fA-F ]+)\s*;\s*(%s)\s*#\s*(?:[^\s]+)\s+(.*)\s*"
% "|".join(_EMOJI_QUAL_TYPES)
)
result = []
GROUP_PREFIX = "# group: "
SUBGROUP_PREFIX = "# subgroup: "
group = None
subgroup = None
for line in data_string.splitlines():
line = line.strip()
if not line:
continue
if line[0] == "#":
if line.startswith(GROUP_PREFIX):
group = line[len(GROUP_PREFIX) :].strip().encode("ascii")
subgroup = None
elif line.startswith(SUBGROUP_PREFIX):
subgroup = line[len(SUBGROUP_PREFIX) :].strip().encode("ascii")
continue
m = line_re.match(line)
if not m:
raise ValueError('Did not match "%s" in emoji-test.txt' % line)
if m.group(2) not in ["component", "fully-qualified"]:
# We only want component and fully-qualified sequences, as those are
# 'canonical'. 'minimally-qualified' apparently just leave off the
# FEOF emoji presentation tag, we already assume these.
# Information for the unqualified sequences should be
# redundant. At the moment we don't verify this so if the file
# changes we won't catch that.
continue
seq = tuple(int(s, 16) for s in m.group(1).split())
name = m.group(3).strip()
if not (group and subgroup):
raise Exception(
"sequence %s missing group or subgroup" % seq_to_string(seq)
)
result.append((seq, group, subgroup, name))
return result
_SUPPLEMENTAL_EMOJI_GROUP_DATA = """
# group: Misc
# subgroup: used with keycaps
0023 fe0f ; fully-qualified # ? number sign
002a fe0f ; fully-qualified # ? asterisk
0030 fe0f ; fully-qualified # ? digit zero
0031 fe0f ; fully-qualified # ? digit one
0032 fe0f ; fully-qualified # ? digit two
0033 fe0f ; fully-qualified # ? digit three
0034 fe0f ; fully-qualified # ? digit four
0035 fe0f ; fully-qualified # ? digit five
0036 fe0f ; fully-qualified # ? digit six
0037 fe0f ; fully-qualified # ? digit seven
0038 fe0f ; fully-qualified # ? digit eight
0039 fe0f ; fully-qualified # ? digit nine
20e3 ; fully-qualified # ? combining enclosing keycap
# As of Unicode 11 these have group data defined.
# subgroup: skin-tone modifiers
#1f3fb ; fully-qualified # ? emoji modifier fitzpatrick type-1-2
#1f3fc ; fully-qualified # ? emoji modifier fitzpatrick type-3
#1f3fd ; fully-qualified # ? emoji modifier fitzpatrick type-4
#1f3fe ; fully-qualified # ? emoji modifier fitzpatrick type-5
#1f3ff ; fully-qualified # ? emoji modifier fitzpatrick type-6
# subgroup: regional indicator symbols
1f1e6 ; fully-qualified # ? regional indicator symbol letter A
1f1e7 ; fully-qualified # ? regional indicator symbol letter B
1f1e8 ; fully-qualified # ? regional indicator symbol letter C
1f1e9 ; fully-qualified # ? regional indicator symbol letter D
1f1ea ; fully-qualified # ? regional indicator symbol letter E
1f1eb ; fully-qualified # ? regional indicator symbol letter F
1f1ec ; fully-qualified # ? regional indicator symbol letter G
1f1ed ; fully-qualified # ? regional indicator symbol letter H
1f1ee ; fully-qualified # ? regional indicator symbol letter I
1f1ef ; fully-qualified # ? regional indicator symbol letter J
1f1f0 ; fully-qualified # ? regional indicator symbol letter K
1f1f1 ; fully-qualified # ? regional indicator symbol letter L
1f1f2 ; fully-qualified # ? regional indicator symbol letter M
1f1f3 ; fully-qualified # ? regional indicator symbol letter N
1f1f4 ; fully-qualified # ? regional indicator symbol letter O
1f1f5 ; fully-qualified # ? regional indicator symbol letter P
1f1f6 ; fully-qualified # ? regional indicator symbol letter Q
1f1f7 ; fully-qualified # ? regional indicator symbol letter R
1f1f8 ; fully-qualified # ? regional indicator symbol letter S
1f1f9 ; fully-qualified # ? regional indicator symbol letter T
1f1fa ; fully-qualified # ? regional indicator symbol letter U
1f1fb ; fully-qualified # ? regional indicator symbol letter V
1f1fc ; fully-qualified # ? regional indicator symbol letter W
1f1fd ; fully-qualified # ? regional indicator symbol letter X
1f1fe ; fully-qualified # ? regional indicator symbol letter Y
1f1ff ; fully-qualified # ? regional indicator symbol letter Z
#subgroup: unknown flag
fe82b ; fully-qualified # ? unknown flag PUA codepoint
"""
# These are skin tone sequences that Unicode decided not to define. Android
# shipped with them, so we're stuck with them forever regardless of what
# Unicode says.
#
# This data is in the format of emoji-sequences.txt and emoji-zwj-sequences.txt
_LEGACY_ANDROID_SEQUENCES = """
1F93C 1F3FB ; Emoji_Modifier_Sequence ; people wrestling: light skin tone # 9.0
1F93C 1F3FC ; Emoji_Modifier_Sequence ; people wrestling: medium-light skin tone # 9.0
1F93C 1F3FD ; Emoji_Modifier_Sequence ; people wrestling: medium skin tone # 9.0
1F93C 1F3FE ; Emoji_Modifier_Sequence ; people wrestling: medium-dark skin tone # 9.0
1F93C 1F3FF ; Emoji_Modifier_Sequence ; people wrestling: dark skin tone # 9.0
1F93C 1F3FB 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: light skin tone # 9.0
1F93C 1F3FC 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: medium-light skin tone # 9.0
1F93C 1F3FD 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: medium skin tone # 9.0
1F93C 1F3FE 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: medium-dark skin tone # 9.0
1F93C 1F3FF 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: dark skin tone # 9.0
1F93C 1F3FB 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: light skin tone # 9.0
1F93C 1F3FC 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: medium-light skin tone # 9.0
1F93C 1F3FD 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: medium skin tone # 9.0
1F93C 1F3FE 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: medium-dark skin tone # 9.0
1F93C 1F3FF 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: dark skin tone # 9.0
"""
# Defines how to insert the new sequences into the standard order data. Would
# have been nice to merge it into the above legacy data but that would have
# required a format change.
_LEGACY_ANDROID_ORDER = """
-1F93C # people wrestling
1F93C 1F3FB
1F93C 1F3FC
1F93C 1F3FD
1F93C 1F3FE
1F93C 1F3FF
-1F93C 200D 2642 FE0F # men wrestling
1F93C 1F3FB 200D 2642 FE0F
1F93C 1F3FC 200D 2642 FE0F
1F93C 1F3FD 200D 2642 FE0F
1F93C 1F3FE 200D 2642 FE0F
1F93C 1F3FF 200D 2642 FE0F
-1F93C 200D 2640 FE0F # women wrestling
1F93C 1F3FB 200D 2640 FE0F
1F93C 1F3FC 200D 2640 FE0F
1F93C 1F3FD 200D 2640 FE0F
1F93C 1F3FE 200D 2640 FE0F
1F93C 1F3FF 200D 2640 FE0F
"""
def _get_order_patch(order_text, seq_to_name):
"""Create a mapping from a key sequence to a list of sequence, name tuples.
This will be used to insert additional sequences after the key sequence
in the order data. seq_to_name is a mapping from new sequence to name,
so the names don't have to be duplicated in the order data."""
patch_map = {}
patch_key = None
patch_list = None
def get_sequence(seqtext):
return tuple([int(s, 16) for s in seqtext.split()])
for line in order_text.splitlines():
ix = line.find("#")
if ix >= 0:
line = line[:ix]
line = line.strip()
if not line:
continue
if line.startswith("-"):
if patch_list and patch_key:
patch_map[patch_key] = patch_list
patch_key = get_sequence(line[1:])
patch_list = []
else:
seq = get_sequence(line)
name = seq_to_name[seq] # exception if seq is not in sequence_text
patch_list.append((seq, name))
if patch_list and patch_key:
patch_map[patch_key] = patch_list
return patch_map
def _get_android_order_patch():
"""Get an order patch using the legacy android data."""
# maps from sequence to (name, age, type), we only need the name
seq_data = _read_emoji_data(_LEGACY_ANDROID_SEQUENCES.splitlines())
seq_to_name = {k: v[0] for k, v in seq_data.items()}
return _get_order_patch(_LEGACY_ANDROID_ORDER, seq_to_name)
def _apply_order_patch(patch, group_list):
"""patch is a map from a key sequence to list of sequence, name pairs, and
group_list is an ordered list of sequence, group, subgroup, name tuples.
Iterate through the group list appending each item to a new list, and
after appending an item matching a key sequence, also append all of its
associated sequences in order using the same group and subgroup.
Return the new list. If there are any unused patches, raise an exception."""
result = []
patched = set()
for t in group_list:
result.append(t)
if t[0] in patch:
patched.add(t[0])
_, group, subgroup, _ = t
for seq, name in patch[t[0]]:
result.append((seq, group, subgroup, name))
unused = set(patch.keys()) - patched
if unused:
raise Exception(
"%d unused patch%s\n %s: "
% (
len(unused),
"" if len(unused) == 1 else "es",
"\n ".join(seq_to_string(seq) for seq in sorted(unused)),
)
)
return result
def _load_emoji_group_data():
global _emoji_group_data
if _emoji_group_data:
return
_emoji_group_data = {}
with open_unicode_data_file("emoji-test.txt") as f:
text = f.read()
group_list = _read_emoji_test_data(text)
# patch with android items
patch = _get_android_order_patch()
group_list = _apply_order_patch(patch, group_list)
group_list.extend(_read_emoji_test_data(_SUPPLEMENTAL_EMOJI_GROUP_DATA))
for i, (seq, group, subgroup, name) in enumerate(group_list):
if seq in _emoji_group_data:
print(
"seq %s already in group data as %s"
% (seq_to_string(seq), _emoji_group_data[seq])
)
print(" new value would be %s" % str((i, group, subgroup, name)))
_emoji_group_data[seq] = (i, group, subgroup, name)
assert len(group_list) == len(_emoji_group_data)
def get_emoji_group_data(seq):
"""Return group data for the canonical sequence seq, or None.
Group data is a tuple of index, group, subgroup, and name. The
index is a unique global sort index for the sequence among all
sequences in the group data."""
_load_emoji_group_data()
return _emoji_group_data.get(seq, None)
def get_emoji_groups():
"""Return the main emoji groups, in order."""
_load_emoji_group_data()
groups = []
group = None
for _, g, _, _ in sorted(_emoji_group_data.values()):
if g != group:
group = g
groups.append(group)
return groups
def get_emoji_subgroups(group):
"""Return the subgroups of this group, in order, or None
if the group is not recognized."""
_load_emoji_group_data()
subgroups = []
subgroup = None
for _, g, sg, _ in sorted(_emoji_group_data.values()):
if g == group:
if sg != subgroup:
subgroup = sg
subgroups.append(subgroup)
return subgroups if subgroups else None
def get_emoji_in_group(group, subgroup=None):
"""Return the sorted list of the emoji sequences in the group (limiting to
subgroup if subgroup is not None). Returns None if group does not
exist, and an empty list if subgroup does not exist in group."""
_load_emoji_group_data()
result = None
for seq, (index, g, sg, _) in _emoji_group_data.items():
if g == group:
if result is None:
result = []
if subgroup and sg != subgroup:
continue
result.append(seq)
result.sort(key=lambda s: _emoji_group_data[s][0])
return result
def get_sorted_emoji_sequences(seqs):
"""Seqs is a collection of canonical emoji sequences. Returns a list of
these sequences in the canonical emoji group order. Sequences that are not
canonical are placed at the end, in unicode code point order.
"""
_load_emoji_group_data()
return sorted(seqs, key=lambda s: (_emoji_group_data.get(s, 100000), s))
def _load_emoji_sequence_data():
"""Ensure the emoji sequence data is initialized."""
global _emoji_sequence_data, _emoji_non_vs_to_canonical
if _emoji_sequence_data is not None:
return
_emoji_sequence_data = {}
_emoji_non_vs_to_canonical = {}
def add_data(data):
for k, t in data.items():
if k in _emoji_sequence_data:
print("already have data for sequence:", seq_to_string(k), t)
_emoji_sequence_data[k] = t
if EMOJI_VS in k:
_emoji_non_vs_to_canonical[strip_emoji_vs(k)] = k
for datafile in ["emoji-zwj-sequences.txt", "emoji-sequences.txt"]:
add_data(_read_emoji_data_file(datafile))
add_data(_read_emoji_data(_LEGACY_ANDROID_SEQUENCES.splitlines()))
_load_unicode_data_txt() # ensure character_names_data is populated
_load_emoji_data() # ensure presentation_default_text is populated
_load_emoji_group_data() # ensure group data is populated
# Get names for single emoji from the test data. We will prefer these over
# those in UnicodeData (e.g. prefer "one o'clock" to "clock face one oclock"),
# and if they're not in UnicodeData these are proposed new emoji.
for seq, (_, _, _, emoji_name) in _emoji_group_data.items():
non_vs_seq = strip_emoji_vs(seq)
if len(non_vs_seq) > 1:
continue
cp = non_vs_seq[0]
# If it's not in character names data, it's a proposed emoji.
if cp not in _character_names_data:
# use 'ignore' to strip curly quotes etc if they exist, unicode
# character names are ASCII, and it's probably best to keep it that way.
cp_name = emoji_name.encode("ascii", "ignore").upper()
_character_names_data[cp] = cp_name
is_default_text_presentation = cp in _presentation_default_text
if is_default_text_presentation:
seq = (cp, EMOJI_VS)
emoji_age = age(cp)
if emoji_age is not None:
emoji_age = float(emoji_age)
emoji_age = PROPOSED_EMOJI_AGE
current_data = _emoji_sequence_data.get(seq) or (
emoji_name,
emoji_age,
"Emoji_Single_Sequence",
)
if is_default_text_presentation:
emoji_name = "(emoji) " + emoji_name
_emoji_sequence_data[seq] = (emoji_name, current_data[1], current_data[2])
# Fill in sequences of single emoji, handling non-canonical to canonical also.
for k in _emoji:
non_vs_seq = (k,)
is_default_text_presentation = k in _presentation_default_text
if is_default_text_presentation:
canonical_seq = (k, EMOJI_VS)
_emoji_non_vs_to_canonical[non_vs_seq] = canonical_seq
else:
canonical_seq = non_vs_seq
if canonical_seq in _emoji_sequence_data:
# Prefer names we have where they exist
emoji_name, emoji_age, seq_type = _emoji_sequence_data[canonical_seq]
else:
emoji_name = name(k, "unnamed").lower()
if name == "unnamed":
continue
emoji_age = age(k)
seq_type = "Emoji_Single_Sequence"
if is_default_text_presentation and not emoji_name.startswith("(emoji) "):
emoji_name = "(emoji) " + emoji_name
_emoji_sequence_data[canonical_seq] = (emoji_name, emoji_age, seq_type)
def get_emoji_sequences(age=None, types=None):
"""Return the set of canonical emoji sequences, filtering to those <= age
if age is not None, and those with type in types (if not a string) or
type == types (if type is a string) if types is not None. By default
all sequences are returned, including those for single emoji."""
_load_emoji_sequence_data()
result = _emoji_sequence_data.keys()
if types is not None:
if isinstance(types, basestring):
types = frozenset([types])
result = [k for k in result if _emoji_sequence_data[k][1] in types]
if age is not None:
age = float(age)
result = [k for k in result if _emoji_sequence_data[k][0] <= age]
return result
def get_emoji_sequence_data(seq):
"""Return a tuple of the name, age, and type for the (possibly non-canonical)
sequence, or None if not recognized as a sequence."""
_load_emoji_sequence_data()
seq = get_canonical_emoji_sequence(seq)
if not seq or seq not in _emoji_sequence_data:
return None
return _emoji_sequence_data[seq]
def get_emoji_sequence_name(seq):
"""Return the name of the (possibly non-canonical) sequence, or None if
not recognized as a sequence."""
data = get_emoji_sequence_data(seq)
return None if not data else data[0]
def get_emoji_sequence_age(seq):
"""Return the age of the (possibly non-canonical) sequence, or None if
not recognized as a sequence. Proposed sequences have PROPOSED_EMOJI_AGE
as the age."""
# floats are a pain since the actual values are decimal. maybe use
# strings to represent age.
data = get_emoji_sequence_data(seq)
return None if not data else data[1]
def get_emoji_sequence_type(seq):
"""Return the type of the (possibly non-canonical) sequence, or None if
not recognized as a sequence. Types are in EMOJI_SEQUENCE_TYPES."""
data = get_emoji_sequence_data(seq)
return None if not data else data[2]
def is_canonical_emoji_sequence(seq):
"""Return true if this is a canonical emoji sequence (has 'vs' where Unicode
says it should), and is known."""
_load_emoji_sequence_data()
return seq in _emoji_sequence_data
def get_canonical_emoji_sequence(seq):
"""Return the canonical version of this emoji sequence if the sequence is
known, or None."""
if is_canonical_emoji_sequence(seq):
return seq
seq = strip_emoji_vs(seq)
return _emoji_non_vs_to_canonical.get(seq, None)
def strip_emoji_vs(seq):
"""Return a version of this emoji sequence with emoji variation selectors
stripped. This is the 'non-canonical' version used by the color emoji font,
which doesn't care how the sequence is represented in text."""
if EMOJI_VS in seq:
return tuple([cp for cp in seq if cp != EMOJI_VS])
return seq
def seq_to_string(seq):
"""Return a string representation of the codepoint sequence."""
return "_".join("%04x" % cp for cp in seq)
def string_to_seq(seq_str):
"""Return a codepoint sequence (tuple) given its string representation."""
return tuple([int(s, 16) for s in seq_str.split("_")])
def is_cp_seq(seq):
return all(0 <= n <= 0x10FFFF for n in seq)
_REGIONAL_INDICATOR_START = 0x1F1E6
_REGIONAL_INDICATOR_END = 0x1F1FF
def is_regional_indicator(cp):
return _REGIONAL_INDICATOR_START <= cp <= _REGIONAL_INDICATOR_END
def is_regional_indicator_seq(cps):
return len(cps) == 2 and all(is_regional_indicator(cp) for cp in cps)
def regional_indicator_to_ascii(cp):
assert is_regional_indicator(cp)
return chr(cp - _REGIONAL_INDICATOR_START + ord("A"))
def ascii_to_regional_indicator(ch):
assert "A" <= ch <= "Z"
return ord(ch) - ord("A") + _REGIONAL_INDICATOR_START
def string_to_regional_indicator_seq(s):
assert len(s) == 2
return ascii_to_regional_indicator(s[0]), ascii_to_regional_indicator(s[1])
def regional_indicator_seq_to_string(cps):
assert len(cps) == 2
return "".join(regional_indicator_to_ascii(cp) for cp in cps)
def is_tag(cp):
return 0xE0020 < cp < 0xE0080 or cp == 0xE0001
def tag_character_to_ascii(cp):
assert is_tag(cp)
if cp == 0xE0001:
return "[begin]"
if cp == 0xE007F:
return "[end]"
return chr(cp - 0xE0000)
def is_regional_tag_seq(seq):
return (
seq[0] == 0x1F3F4
and seq[-1] == 0xE007F
and all(0xE0020 < cp < 0xE007E for cp in seq[1:-1])
)
_FITZ_START = 0x1F3FB
_FITZ_END = 0x1F3FF
def is_skintone_modifier(cp):
return _FITZ_START <= cp <= _FITZ_END
def get_presentation_default_emoji():
_load_emoji_data()
return _presentation_default_emoji
def get_presentation_default_text():
_load_emoji_data()
return _presentation_default_text
def get_emoji():
_load_emoji_data()
return _emoji
def is_emoji(cp):
_load_emoji_data()
return cp in _emoji
def is_emoji_modifier_base(cp):
_load_emoji_data()
return cp in _emoji_modifier_base
def _load_unicode_emoji_variants():
"""Parse StandardizedVariants.txt and initialize a set of characters
that have a defined emoji variant presentation. All such characters
also have a text variant presentation so a single set works for both."""
global _emoji_variants, _emoji_variants_proposed
if _emoji_variants:
return
emoji_variants = set()
# prior to Unicode 11 emoji variants were part of the standard data.
# as of Unicode 11 however they're only in a separate emoji data file.
line_re = re.compile(r"([0-9A-F]{4,6})\s+FE0F\s*;\s*emoji style\s*;")
with open_unicode_data_file("emoji-variation-sequences.txt") as f:
for line in f:
m = line_re.match(line)
if m:
emoji_variants.add(int(m.group(1), 16))
_emoji_variants = frozenset(emoji_variants)
try:
read = 0
skipped = 0
with open_unicode_data_file("proposed-variants.txt") as f:
for line in f:
m = line_re.match(line)
if m:
read += 1
cp = int(m.group(1), 16)
if cp in emoji_variants:
skipped += 1
else:
emoji_variants.add(cp)
print(
"skipped %s %d proposed variants"
% ("all of" if skipped == read else skipped, read)
)
except IOError as e:
if e.errno != 2:
raise
_emoji_variants_proposed = frozenset(emoji_variants)
def get_unicode_emoji_variants(include_proposed="proposed"):
"""Returns the emoji characters that have both emoji and text presentations.
If include_proposed is 'proposed', include the ones proposed in 2016/08. If
include_proposed is 'proposed_extra', also include the emoji Noto proposes
for text presentation treatment to align related characters. Else
include_proposed should resolve to boolean False."""
_load_unicode_emoji_variants()
if not include_proposed:
return _emoji_variants
elif include_proposed == "proposed":
return _emoji_variants_proposed
elif include_proposed == "proposed_extra":
extra = tool_utils.parse_int_ranges("1f4b9 1f4c8-1f4ca 1f507 1f509-1f50a 1f44c")
return _emoji_variants_proposed | extra
else:
raise Exception(
"include_proposed is %s which is not in ['proposed', 'proposed_extra']"
% include_proposed
)
def _load_variant_data():
"""Parse StandardizedVariants.txt and initialize all non-emoji variant
data. The data is a mapping from codepoint to a list of tuples of:
- variant selector
- compatibility character (-1 if there is none)
- shaping context (bitmask, 1 2 4 8 for isolate initial medial final)
The compatibility character is for cjk mappings that map to 'the same'
glyph as another CJK character."""
global _variant_data, _variant_data_cps
if _variant_data:
return
compatibility_re = re.compile(r"\s*CJK COMPATIBILITY IDEOGRAPH-([0-9A-Fa-f]+)")
variants = collections.defaultdict(list)
with open_unicode_data_file("StandardizedVariants.txt") as f:
for line in f:
x = line.find("#")
if x >= 0:
line = line[:x]
line = line.strip()
if not line:
continue
tokens = line.split(";")
cp, var = tokens[0].split(" ")
cp = int(cp, 16)
varval = int(var, 16)
if varval in [0xFE0E, 0xFE0F]:
continue # ignore emoji variants
m = compatibility_re.match(tokens[1].strip())
compat = int(m.group(1), 16) if m else -1
context = 0
if tokens[2]:
ctx = tokens[2]
if ctx.find("isolate") != -1:
context += 1
if ctx.find("initial") != -1:
context += 2
if ctx.find("medial") != -1:
context += 4
if ctx.find("final") != -1:
context += 8
variants[cp].append((varval, compat, context))
_variant_data_cps = frozenset(variants.keys())
_variant_data = variants
def has_variant_data(cp):
_load_variant_data()
return cp in _variant_data
def get_variant_data(cp):
_load_variant_data()
return _variant_data[cp][:] if cp in _variant_data else None
def variant_data_cps():
_load_variant_data()
return _variant_data_cps
# proposed emoji
def _load_proposed_emoji_data():
"""Parse proposed-emoji.txt if it exists to get cps/names of proposed emoji
(but not approved) for this version of Unicode."""
global _proposed_emoji_data, _proposed_emoji_data_cps
if _proposed_emoji_data:
return
_proposed_emoji_data = {}
line_re = re.compile(r"^U\+([a-zA-z0-9]{4,5})\s.*\s\d{4}Q\d\s+(.*)$")
try:
with open_unicode_data_file("proposed-emoji.txt") as f:
for line in f:
line = line.strip()
if not line or line[0] == "#" or line.startswith(u"\u2022"):
continue
m = line_re.match(line)
if not m:
raise ValueError('did not match "%s"' % line)
cp = int(m.group(1), 16)
name = m.group(2)
if cp in _proposed_emoji_data:
raise ValueError(
"duplicate emoji %x, old name: %s, new name: %s"
% (cp, _proposed_emoji_data[cp], name)
)
_proposed_emoji_data[cp] = name
except IOError as e:
if e.errno != 2:
# not file not found, rethrow
raise
_proposed_emoji_data_cps = frozenset(_proposed_emoji_data.keys())
def proposed_emoji_name(cp):
_load_proposed_emoji_data()
return _proposed_emoji_data.get(cp, "")
def proposed_emoji_cps():
_load_proposed_emoji_data()
return _proposed_emoji_data_cps
def is_proposed_emoji(cp):
_load_proposed_emoji_data()
return cp in _proposed_emoji_data_cps
def read_codeset(text):
line_re = re.compile(r"^0x([0-9a-fA-F]{2,6})\s+0x([0-9a-fA-F]{4,6})\s+.*")
codeset = set()
for line in text.splitlines():
m = line_re.match(line)
if m:
cp = int(m.group(2), 16)
codeset.add(cp)
return codeset
def codeset(cpname):
"""Return a set of the unicode codepoints in the code page named cpname, or
None."""
filename = ("%s.txt" % cpname).upper()
filepath = path.join(
path.dirname(__file__), os.pardir, "third_party", "unicode", filename
)
if not path.isfile(filepath):
return None
with open(filepath, "r") as f:
return read_codeset(f.read())
def _dump_emoji_presentation():
"""Dump presentation info, for testing."""
text_p = 0
emoji_p = 0
for cp in sorted(get_emoji()):
cp_name = name(cp, "<error>")
if cp in get_presentation_default_emoji():
presentation = "emoji"
emoji_p += 1
elif cp in get_presentation_default_text():
presentation = "text"
text_p += 1
else:
presentation = "<error>"
print(
"%s%04x %5s %s" % (" " if cp < 0x10000 else "", cp, presentation, cp_name)
)
print(
"%d total emoji, %d text presentation, %d emoji presentation"
% (len(get_emoji()), text_p, emoji_p)
)
def _load_nameslist_data():
global _nameslist_see_also
if _nameslist_see_also is not None:
return
_nameslist_see_also = collections.defaultdict(set)
cp = None
line_re = re.compile(r"^(?:(?:([0-9A-F]{4,6})\t.*)|(?:^\s+([x=])\s+(.*)))$")
see_also_re = re.compile(r"\s*(?:\(.*\s-\s+([0-9A-F]{4,6})\))|([0-9A-F]{4,6})")
with open_unicode_data_file("NamesList.txt") as f:
for line in f:
m = line_re.match(line)
if not m:
continue
if m.group(1):
cp = int(m.group(1), 16)
else:
rel = m.group(2).strip()
val = m.group(3).strip()
if rel != "x":
continue
m = see_also_re.match(val)
if not m:
raise Exception(
'could not match see also val "%s" in line "%s"' % (val, line)
)
ref_cp = int(m.group(1) or m.group(2), 16)
_nameslist_see_also[cp].add(ref_cp)
def see_also(cp):
_load_nameslist_data()
return frozenset(_nameslist_see_also.get(cp))
def _load_namealiases_data():
global _namealiases_alt_names
if _namealiases_alt_names is not None:
return
_namealiases_alt_names = collections.defaultdict(list)
line_re = re.compile(r"([0-9A-F]{4,6});([^;]+);(.*)$")
with open_unicode_data_file("NameAliases.txt") as f:
for line in f:
m = line_re.match(line)
if not m:
continue
cp = int(m.group(1), 16)
name = m.group(2).strip()
name_type = m.group(3).strip()
if not name_type in [
"correction",
"control",
"alternate",
"figment",
"abbreviation",
]:
raise Exception('unknown name type in "%s"' % line)
if name_type == "figment":
continue
_namealiases_alt_names[cp].append((name, name_type))
def alt_names(cp):
"""Return list of name, nametype tuples for cp, or None."""
_load_namealiases_data()
return tuple(_namealiases_alt_names.get(cp))
if __name__ == "__main__":
all_sequences = sorted(get_emoji_sequences())
for k in all_sequences:
if not get_emoji_group_data(k):
print("no data:", seq_to_string(k))
for group in get_emoji_groups():
print("group:", group)
for subgroup in get_emoji_subgroups(group):
print(" subgroup:", subgroup)
print(" %d items" % len(get_emoji_in_group(group, subgroup)))
# dump some information for annotations
for k in get_sorted_emoji_sequences(all_sequences):
age = get_emoji_sequence_age(k)
if age == 12:
print(seq_to_string(k).replace("_", " "), "#", get_emoji_sequence_name(k))
|
googlefonts/nototools
|
nototools/unicode_data.py
|
Python
|
apache-2.0
| 57,418
| 0.000871
|
import unittest
import os
import numpy as np
import math
from tables import IsDescription, Int32Col, Float32Col
from pymicro.core.samples import SampleData
from BasicTools.Containers.ConstantRectilinearMesh import ConstantRectilinearMesh
import BasicTools.Containers.UnstructuredMeshCreationTools as UMCT
from config import PYMICRO_EXAMPLES_DATA_DIR
class TestGrainData(IsDescription):
"""
Description class specifying structured storage for tests
"""
idnumber = Int32Col() # Signed 64-bit integer
volume = Float32Col() # float
center = Float32Col(shape=(3,)) # float
class TestDerivedClass(SampleData):
""" Class to test the datamodel specification mechanism, via definition
of classes derived from SampleData
"""
def minimal_data_model(self):
"""
Specify the minimal contents of the hdf5 (Group names, paths,, and
group types) in the form of a dictionary {content:Location}
Extends SampleData Class _minimal_data_model class
"""
# create a dtype to create a structured array
Descr = np.dtype([('density', np.float32), ('melting_Pt', np.float32),
('Chemical_comp', 'S', 30)])
# create data model description dictionaries
minimal_content_index_dic = {'Image_data': '/CellData',
'grain_map': '/CellData/grain_map',
'Grain_data': '/GrainData',
'GrainDataTable': ('/GrainData/'
'GrainDataTable'),
'Crystal_data': '/CrystalStructure',
'lattice_params': ('/CrystalStructure'
'/LatticeParameters'),
'lattice_props': ('/CrystalStructure'
'/LatticeProps'),
'grain_names': '/GrainData/GrainNames',
'Mesh_data': '/MeshData'}
minimal_content_type_dic = {'Image_data': '3DImage',
'grain_map': 'field_array',
'Grain_data': 'Group',
'GrainDataTable': Test_GrainData,
'Crystal_data': 'Group',
'lattice_params': 'data_array',
'lattice_props': Descr,
'grain_names': 'string_array',
'Mesh_data': 'Mesh'
}
return minimal_content_index_dic, minimal_content_type_dic
class SampleDataTests(unittest.TestCase):
def setUp(self):
print('testing the SampleData class')
# Create data to store into SampleData instances
# dataset sample_name and description
self.sample_name = 'test_sample'
self.sample_description = """
This is a test dataset created by the SampleData class unit tests.
"""
# Create a mesh of an octahedron with 6 triangles
self.mesh_nodes = np.array([[-1., -1., 0.],
[-1., 1., 0.],
[1., 1., 0.],
[1., -1., 0.],
[0., 0., 1.],
[0., 0., -1.]])
self.mesh_elements = np.array([[0, 1, 4],
[0, 1, 5],
[1, 2, 4],
[1, 2, 5],
[2, 3, 4],
[2, 3, 5],
[3, 0, 4],
[3, 0, 5]])
# Create 2 fields 'shape functions' for the 2 nodes at z=+/-1
self.mesh_shape_f1 = np.array([0., 0., 0., 0., 1., 0.])
self.mesh_shape_f2 = np.array([0., 0., 0., 0., 0., 1.])
# Create 2 element wise fields
self.mesh_el_Id = np.array([0., 1., 2., 3., 4., 5., 6., 7.])
self.mesh_alternated = np.array([1., 1., -1., -1., 1., 1., -1., -1.])
# Create a binary 3D Image
self.image = np.zeros((10, 10, 10), dtype='int16')
self.image[:, :, :5] = 1
self.image_origin = np.array([-1., -1., -1.])
self.image_voxel_size = np.array([0.2, 0.2, 0.2])
# Create a data array
self.data_array = np.array([math.tan(x) for x in
np.linspace(-math.pi/4, math.pi/4, 51)])
# Create numpy dtype and structure array
# WARNING: Pytables transforms all strings into bytes
# --> use only bytes in dtypes
self.dtype1 = np.dtype([('density', np.float32),
('melting_Pt', np.float32),
('Chemical_comp', 'S', 30)])
self.struct_array1 = np.array([(6.0, 1232, 'Cu2O'),
(5.85, 2608, 'ZrO2')],
dtype=self.dtype1)
# Test file pathes
self.filename = os.path.join(PYMICRO_EXAMPLES_DATA_DIR,
'test_sampledata')
self.derived_filename = self.filename+'_derived'
self.reference_file = os.path.join(PYMICRO_EXAMPLES_DATA_DIR,
'test_sampledata_ref')
def test_create_sample(self):
"""Test creation of a SampleData instance/file and data storage."""
sample = SampleData(filename=self.filename,
overwrite_hdf5=True, verbose=False,
sample_name=self.sample_name,
sample_description=self.sample_description)
self.assertTrue(os.path.exists(self.filename + '.h5'))
self.assertTrue(os.path.exists(self.filename + '.xdmf'))
self.assertEqual(sample.get_sample_name(), self.sample_name)
self.assertEqual(sample.get_description(), self.sample_description)
# Add mesh data into SampleData dataset
mesh = UMCT.CreateMeshOfTriangles(self.mesh_nodes, self.mesh_elements)
# Add mesh node tags
mesh.nodesTags.CreateTag('Z0_plane', False).SetIds([0, 1, 2, 3])
mesh.nodesTags.CreateTag('out_of_plane', False).SetIds([4, 5])
# Add element tags
mesh.GetElementsOfType('tri3').GetTag('Top').SetIds([0, 2, 4, 6])
mesh.GetElementsOfType('tri3').GetTag('Bottom').SetIds([1, 3, 5, 7])
# Add mesh node fields
mesh.nodeFields['Test_field1'] = self.mesh_shape_f1
mesh.nodeFields['Test_field2'] = self.mesh_shape_f2
# Add mesh element fields
mesh.elemFields['Test_field3'] = self.mesh_el_Id
mesh.elemFields['Test_field4'] = self.mesh_alternated
sample.add_mesh(mesh, meshname='test_mesh', indexname='mesh',
location='/', bin_fields_from_sets=True)
# Add image data into SampleData dataset
image = ConstantRectilinearMesh(dim=len(self.image.shape))
image.SetDimensions(self.image.shape)
image.SetOrigin(self.image_origin)
image.SetSpacing(self.image_voxel_size)
image.elemFields['test_image_field'] = self.image
sample.add_image(image, imagename='test_image', indexname='image',
location='/')
# Add new group and array to SampleData dataset
sample.add_group(groupname='test_group', location='/', indexname='group')
sample.add_data_array(location='group', name='test_array',
array=self.data_array, indexname='array')
# close sample data instance
del sample
# reopen sample data instance
sample = SampleData(filename=self.filename)
# test mesh geometry data recovery
mesh_nodes = sample.get_mesh_nodes(meshname='mesh', as_numpy=True)
self.assertTrue(np.all(mesh_nodes == self.mesh_nodes))
mesh_elements = sample.get_mesh_xdmf_connectivity(meshname='mesh',
as_numpy=True)
mesh_elements = mesh_elements.reshape(self.mesh_elements.shape)
self.assertTrue(np.all(mesh_elements == self.mesh_elements))
# test mesh field recovery
shape_f1 = sample.get_field('Test_field1')
self.assertTrue(np.all(shape_f1 == self.mesh_shape_f1))
# test image field recovery and dictionary like access
image_field = sample['test_image_field']
self.assertTrue(np.all(image_field == self.image))
# test data array recovery and attribute like access
array = sample.test_array
self.assertTrue(np.all(array == self.data_array))
# test sampledata instance and file autodelete function
sample.autodelete = True
del sample
self.assertTrue(not os.path.exists(self.filename+'.h5'))
self.assertTrue(not os.path.exists(self.filename+'.xdmf'))
def test_copy_and_compress(self):
""" Copy the reference dataset and compress it """
sample = SampleData.copy_sample(src_sample_file=self.reference_file,
dst_sample_file=self.filename,
overwrite=True, get_object=True,
autodelete=True)
# get filesizes
original_filesize, _ = sample.get_file_disk_size(print_flag=False,
convert=False)
original_size, _ = sample.get_node_disk_size('test_image_field',
print_flag=False,
convert=False)
# Verify data content
data_array = sample.get_node('test_array')
self.assertTrue(np.all(self.data_array == data_array))
# compress image data
c_opt = {'complib': 'zlib', 'complevel': 1}
sample.set_chunkshape_and_compression(nodename='test_image_field',
compression_options=c_opt)
# assert that node size is smaller after compression
new_size, _ = sample.get_node_disk_size('test_image_field',
print_flag=False,
convert=False)
new_filesize, _ = sample.get_file_disk_size(print_flag=False,
convert=False)
# repack file and assert file size is lower than original filesize
sample.repack_h5file()
new_filesize, _ = sample.get_file_disk_size(print_flag=False,
convert=False)
self.assertGreater(original_filesize, new_filesize)
# delete SampleData instance and assert files deletion
del sample
self.assertTrue(not os.path.exists(self.filename + '.h5'))
self.assertTrue(not os.path.exists(self.filename + '.xdmf'))
def test_derived_class(self):
""" Test application specific data model specification through
derived classes.
Also test table functionalities.
"""
derived_sample = TestDerivedClass(filename=self.derived_filename,
autodelete=False,
overwrite_hdf5=True, verbose=False)
# assert data model Nodes are contained in dataset
self.assertTrue(derived_sample.__contains__('Image_data'))
self.assertTrue(derived_sample.__contains__('grain_map'))
self.assertTrue(derived_sample.__contains__('Grain_data'))
self.assertTrue(derived_sample.__contains__('GrainDataTable'))
self.assertTrue(derived_sample.__contains__('Crystal_data'))
self.assertTrue(derived_sample.__contains__('lattice_params'))
self.assertTrue(derived_sample.__contains__('lattice_props'))
self.assertTrue(derived_sample.__contains__('grain_names'))
self.assertTrue(derived_sample.__contains__('Mesh_data'))
# # assert data items created are empty, except for Groups
self.assertTrue(derived_sample._is_empty('Image_data'))
self.assertTrue(derived_sample._is_empty('grain_map'))
self.assertTrue(derived_sample._is_empty('GrainDataTable'))
self.assertTrue(derived_sample._is_empty('lattice_params'))
self.assertTrue(derived_sample._is_empty('lattice_props'))
self.assertTrue(derived_sample._is_empty('grain_names'))
self.assertTrue(derived_sample._is_empty('Mesh_data'))
# get table node and assert description
descr = derived_sample.get_table_description('GrainDataTable')
self.assertEqual(TestGrainData.columns,
descr._v_colobjects)
# add columns to the table
dtype = np.dtype([('name', np.str_, 16), ('floats', np.float64, (2,))])
derived_sample.add_tablecols('GrainDataTable', description=dtype)
tab = derived_sample.get_node('GrainDataTable')
self.assertTrue('name' in tab.colnames)
self.assertTrue('floats' in tab.colnames)
# append other table with numpy array and verify it is no more empty
derived_sample.append_table('lattice_props', self.struct_array1)
self.assertFalse(derived_sample._is_empty('lattice_props'))
# append string array and verify that it is not empty
derived_sample.append_string_array('grain_names',
['grain_1', 'grain_2', 'grain_3'])
self.assertFalse(derived_sample.get_attribute('empty', 'grain_names'))
del derived_sample
# reopen file and check that neqw columns have been added
derived_sample = TestDerivedClass(
filename=self.derived_filename, autodelete=True,
overwrite_hdf5=False, verbose=False)
derived_sample.print_node_info('GrainDataTable')
tab = derived_sample.get_node('GrainDataTable')
self.assertTrue('name' in tab.colnames)
self.assertTrue('floats' in tab.colnames)
# check other table values
props = derived_sample['lattice_props']
self.assertTrue(np.all(props == self.struct_array1))
# check string array values
name1 = derived_sample['grain_names'][0].decode('utf-8')
name2 = derived_sample['grain_names'][1].decode('utf-8')
self.assertEqual(name1, 'grain_1')
self.assertEqual(name2, 'grain_2')
del derived_sample
self.assertTrue(not os.path.exists(self.derived_filename + '.h5'))
self.assertTrue(not os.path.exists(self.derived_filename + '.xdmf'))
def test_BasicTools_binding(self):
"""Test BasicTools to SampleData to BasicTools."""
# create mesh of triangles
myMesh = UMCT.CreateSquare(dimensions=[3, 3], ofTris=True)
# get into a SampleData instance
sample = SampleData(filename='square', verbose=False, autodelete=True)
sample.add_mesh(mesh_object=myMesh, meshname='BT_mesh', indexname='BTM',
replace=True, bin_fields_from_sets=False)
# get mesh object from SampleData file/instance
myMesh2 = sample.get_mesh('BTM')
# delete SampleData object and test values
self.assertTrue(np.all(myMesh.nodes == myMesh2.nodes))
# assert bulk element connectivity
connectivity = myMesh.elements['tri3'].connectivity
connectivity2 = myMesh2.elements['tri3'].connectivity
self.assertTrue(np.all(connectivity == connectivity2))
# assert boundary element connectivity
connectivity = myMesh.elements['bar2'].connectivity
connectivity2 = myMesh2.elements['bar2'].connectivity
self.assertTrue(np.all(connectivity == connectivity2))
# assert boundary element tags values
elements_in_tag = myMesh.GetElementsInTag('ExteriorSurf')
elements_in_tag2 = myMesh2.GetElementsInTag('ExteriorSurf')
self.assertTrue(np.all(elements_in_tag == elements_in_tag2))
# assert bulk element tags values
elements_in_tag = myMesh.GetElementsInTag('2D')
elements_in_tag2 = myMesh2.GetElementsInTag('2D')
self.assertTrue(np.all(elements_in_tag == elements_in_tag2))
del sample
def test_meshfile_formats(self):
# TODO: add more mesh formats to load in this test
from config import PYMICRO_EXAMPLES_DATA_DIR
sample = SampleData(filename='tmp_meshfiles_dataset',
overwrite_hdf5=True, autodelete=True)
meshfile_name = os.path.join(PYMICRO_EXAMPLES_DATA_DIR,
'cube_ref.geof')
sample.add_mesh(file=meshfile_name, meshname='geof_mesh',
indexname='mesh', bin_fields_from_sets=True)
# check the number of elements of the mesh
n_elems = sample.get_attribute('Number_of_elements', 'mesh')
self.assertTrue(np.all(n_elems == [384, 384]))
# check the element types in the mesh
el_type = sample.get_attribute('element_type', 'mesh')
self.assertEqual(el_type[0], 'tet4')
self.assertEqual(el_type[1], 'tri3')
del sample
def test_mesh_from_image(self):
"""Test BasicTools to SDimage to SDmesh."""
# 3D image parameters
dimensions = [11, 11, 11]
origin = [0., 0., 0.]
spacing = [1., 1., 1.]
# create BasicTools image object
myMesh = ConstantRectilinearMesh(dim=3)
myMesh.SetDimensions(dimensions)
myMesh.SetOrigin(origin)
myMesh.SetSpacing(spacing)
# create data field
data = np.zeros(shape=dimensions)
data[:, 3:8, 3:8] = 1
myMesh.nodeFields['test_field'] = data
# create SD instance and image group
sample = SampleData(filename='cube', verbose=False, autodelete=True)
sample.add_image(image_object=myMesh, imagename='Image_3D',
indexname='Im3D', replace=True)
# create mesh group of tetra from image group
sample.add_mesh_from_image('Im3D', with_fields=True, ofTetras=True,
meshname='Tetra_mesh',
indexname='Tmsh', replace=True)
self.assertTrue(sample.__contains__('Im3D'))
self.assertTrue(sample.__contains__('Tmsh'))
field1 = sample.get_node('test_field', as_numpy=True)
self.assertEqual(field1.shape, (11, 11, 11))
field2 = sample.get_node('Tmsh_test_field_Tetra_mesh',
as_numpy=True)
self.assertEqual(field2.shape, (11 * 11 * 11,))
self.assertEqual(field1.ravel()[37], field2.ravel()[37])
del sample
|
heprom/pymicro
|
pymicro/core/tests/test_samples.py
|
Python
|
mit
| 19,023
| 0.00021
|
import json
import logging
import httplib
import urllib2
from django.core.exceptions import ValidationError
from django.conf import settings
siaUrl=settings.SIA_URL
import re
import string
def sanitize_search_term(term):
# Replace all puncuation with spaces.
allowed_punctuation = set(['&', '|', '"', "'"])
all_punctuation = set(string.punctuation)
punctuation = "".join(all_punctuation - allowed_punctuation)
term = re.sub(r"[{}]+".format(re.escape(punctuation)), " ", \
term)
# Substitute all double quotes to single quotes.
term = term.replace('"', "'")
term = re.sub(r"[']+", "'", term)
# Create regex to find strings within quotes.
quoted_strings_re = re.compile(r"('[^']*')")
space_between_words_re = re.compile(r'([^ &|])[ ]+([^ &|])')
spaces_surrounding_letter_re = re.compile(r'[ ]+([^ &|])[ ]+')
multiple_operator_re = re.compile(r"[ &]+(&|\|)[ &]+")
tokens = quoted_strings_re.split(term)
processed_tokens = []
for token in tokens:
# Remove all surrounding whitespace.
token = token.strip()
if token in ['', "'"]:
continue
if token[0] != "'":
# Surround single letters with &'s
token = spaces_surrounding_letter_re.sub(r' & \1 & ', token)
# Specify '&' between words that have neither | or & specified.
token = space_between_words_re.sub(r'\1 & \2', token)
# Add a prefix wildcard to every search term.
token = re.sub(r'([^ &|]+)', r'\1:*', token)
processed_tokens.append(token)
term = " & ".join(processed_tokens)
# Replace ampersands or pipes surrounded by ampersands.
term = multiple_operator_re.sub(r" \1 ", term)
# Escape single quotes
return term.replace("'", "''")
class SIA:
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
cache = CacheManager(**parse_cache_config_options({
'cache.type': 'file',
'cache.data_dir': '/tmp/horariossiacache/data',
'cache.lock_dir': '/tmp/horariossiacache/lock',
'cache.regions': 'short_term, long_term',
'cache.short_term.type': 'memory',
'cache.short_term.expire': '3600',
'cache.long_term.type': 'file',
'cache.long_term.expire': '86400'
}))
def existsSubject(this,name,level):
return this.queryNumSubjectsWithName(name,level)>0
def queryNumSubjectsWithName(this,name,level):
data = json.dumps({"method": "buscador.obtenerAsignaturas", "params": [name, level, "", level, "", "", 1, 1]})
req = urllib2.Request(siaUrl + "/JSON-RPC", data, {'Content-Type': 'application/json'})
try:
f = urllib2.urlopen(req)
result = json.loads(f.read())["result"]["totalAsignaturas"]
f.close()
except urllib2.HTTPerror, e:
logging.warning('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.warning('URLError = ' + e.reason)
except httplib.HTTPException, e:
logging.warn('HTTPException')
return result
@cache.region('short_term')
def querySubjectsByName(this,name,level,maxRetrieve):
data = json.dumps({"method": "buscador.obtenerAsignaturas", "params": [name, level, "", level, "", "", 1, maxRetrieve]})
req = urllib2.Request(siaUrl + "/JSON-RPC", data, {'Content-Type': 'application/json'})
try:
f = urllib2.urlopen(req)
result = json.loads(f.read())
f.close()
except urllib2.HTTPerror, e:
logging.warning('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.warning('URLError = ' + e.reason)
except httplib.HTTPException, e:
logging.warn('HTTPException')
return result["result"]["asignaturas"]["list"]
@cache.region('short_term')
def queryGroupsBySubjectCode(this,code):
data = json.dumps({"method": "buscador.obtenerGruposAsignaturas", "params": [code, "0"]})
req = urllib2.Request(siaUrl + "/JSON-RPC", data, {'Content-Type': 'application/json'})
result = None
try:
f = urllib2.urlopen(req)
result = json.loads(f.read())
f.close()
except urllib2.HTTPError, e:
logging.warning('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.warning('URLError = ' + e.reason)
except httplib.HTTPException, e:
logging.warn('HTTPException')
if result:
return result["result"]["list"]
else:
return []
@staticmethod
@cache.region('short_term')
def queryGroupsProfessions(code,group):
import re
while True:
try:
f = urllib2.urlopen(siaUrl + "/service/groupInfo.pub?cod_asignatura=" + str(code) + "&grp=" + str(group))
html = f.read().decode("ISO-8859-1")
break
except urllib2.URLError, e:
if e.code == 403:
pass
else:
logging.warning(str(e))
break
except Exception, e:
logging.warning(str(e))
break
relevantSection = re.compile(r'Los planes de estudio para los cuales se ofrece esta asignatura son:</p><div><ul class="modulelist">(.*)</ul></div>').findall(html)
professions = []
if (len(relevantSection)>0):
professionsHtml = re.compile('<li><p>(.*?)</p></li>').findall(relevantSection[0])
for i in professionsHtml:
data = i.split("-")
professions.append((data[0].strip(),re.compile('<em>(.*)</em>').findall("".join(data[1:]))[0]))
return professions
|
xyos/horarios
|
horarios/helpers.py
|
Python
|
mit
| 5,854
| 0.005296
|
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import with_statement
from bisect import bisect_right
from whoosh.fields import UnknownFieldError
from whoosh.store import LockError
from whoosh.support.filelock import try_for
from whoosh.support.externalsort import SortingPool
from whoosh.util import fib
from whoosh.writing import IndexWriter, IndexingError
# Merge policies
# A merge policy is a callable that takes the Index object, the SegmentWriter
# object, and the current segment list (not including the segment being
# written), and returns an updated segment list (not including the segment
# being written).
def NO_MERGE(writer, segments):
"""This policy does not merge any existing segments.
"""
return segments
def MERGE_SMALL(writer, segments):
"""This policy merges small segments, where "small" is defined using a
heuristic based on the fibonacci sequence.
"""
from whoosh.filedb.filereading import SegmentReader
newsegments = []
sorted_segment_list = sorted(segments, key=lambda s: s.doc_count_all())
total_docs = 0
for i, seg in enumerate(sorted_segment_list):
count = seg.doc_count_all()
if count > 0:
total_docs += count
if total_docs < fib(i + 5):
reader = SegmentReader(writer.storage, writer.schema, seg)
writer.add_reader(reader)
reader.close()
else:
newsegments.append(seg)
return newsegments
def OPTIMIZE(writer, segments):
"""This policy merges all existing segments.
"""
from whoosh.filedb.filereading import SegmentReader
for seg in segments:
reader = SegmentReader(writer.storage, writer.schema, seg)
writer.add_reader(reader)
reader.close()
return []
class PostingPool(SortingPool):
# Subclass whoosh.support.externalsort.SortingPool to use knowledge of
# postings to set run size in bytes instead of items
def __init__(self, limitmb=128, **kwargs):
SortingPool.__init__(self, **kwargs)
self.limit = limitmb * 1024 * 1024
self.currentsize = 0
def add(self, item):
# item = (fieldname, text, docnum, weight, valuestring)
size = (28 + 4 * 5 # tuple = 28 + 4 * length
+ 21 + len(item[0]) # fieldname = str = 21 + length
+ 26 + len(item[1]) * 2 # text = unicode = 26 + 2 * length
+ 18 # docnum = long = 18
+ 16 # weight = float = 16
+ 21 + len(item[4] or '')) # valuestring
self.currentsize += size
if self.currentsize > self.limit:
self.save()
self.current.append(item)
def iter_postings(self):
# This is just an alias for items() to be consistent with the
# iter_postings()/add_postings() interface of a lot of other classes
return self.items()
def save(self):
SortingPool.save(self)
self.currentsize = 0
def renumber_postings(reader, startdoc, docmap):
for fieldname, text, docnum, weight, value in reader.iter_postings():
newdoc = docmap[docnum] if docmap else startdoc + docnum
yield (fieldname, text, newdoc, weight, value)
# Writer object
class SegmentWriter(IndexWriter):
def __init__(self, ix, poolclass=None, timeout=0.0, delay=0.1, _lk=True,
limitmb=128, docbase=0, codec=None, compound=True, **kwargs):
# Lock the index
self.writelock = None
if _lk:
self.writelock = ix.lock("WRITELOCK")
if not try_for(self.writelock.acquire, timeout=timeout,
delay=delay):
raise LockError
if codec is None:
from whoosh.codec import default_codec
codec = default_codec()
self.codec = codec
# Get info from the index
self.storage = ix.storage
self.indexname = ix.indexname
info = ix._read_toc()
self.generation = info.generation + 1
self.schema = info.schema
self.segments = info.segments
self.docnum = self.docbase = docbase
self._setup_doc_offsets()
# Internals
self.compound = compound
poolprefix = "whoosh_%s_" % self.indexname
self.pool = PostingPool(limitmb=limitmb, prefix=poolprefix)
newsegment = self.newsegment = codec.new_segment(self.storage,
self.indexname)
self.is_closed = False
self._added = False
# Set up writers
self.perdocwriter = codec.per_document_writer(self.storage, newsegment)
self.fieldwriter = codec.field_writer(self.storage, newsegment)
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.newsegment)
def _setup_doc_offsets(self):
self._doc_offsets = []
base = 0
for s in self.segments:
self._doc_offsets.append(base)
base += s.doc_count_all()
def _check_state(self):
if self.is_closed:
raise IndexingError("This writer is closed")
def add_field(self, fieldname, fieldspec, **kwargs):
self._check_state()
if self._added:
raise Exception("Can't modify schema after adding data to writer")
super(SegmentWriter, self).add_field(fieldname, fieldspec, **kwargs)
def remove_field(self, fieldname):
self._check_state()
if self._added:
raise Exception("Can't modify schema after adding data to writer")
super(SegmentWriter, self).remove_field(fieldname)
def _document_segment(self, docnum):
#Returns the index.Segment object containing the given document
#number.
offsets = self._doc_offsets
if len(offsets) == 1:
return 0
return bisect_right(offsets, docnum) - 1
def _segment_and_docnum(self, docnum):
#Returns an (index.Segment, segment_docnum) pair for the segment
#containing the given document number.
segmentnum = self._document_segment(docnum)
offset = self._doc_offsets[segmentnum]
segment = self.segments[segmentnum]
return segment, docnum - offset
def has_deletions(self):
"""
Returns True if this index has documents that are marked deleted but
haven't been optimized out of the index yet.
"""
return any(s.has_deletions() for s in self.segments)
def delete_document(self, docnum, delete=True):
self._check_state()
if docnum >= sum(seg.doccount for seg in self.segments):
raise IndexingError("No document ID %r in this index" % docnum)
segment, segdocnum = self._segment_and_docnum(docnum)
segment.delete_document(segdocnum, delete=delete)
def deleted_count(self):
"""
:returns: the total number of deleted documents in the index.
"""
return sum(s.deleted_count() for s in self.segments)
def is_deleted(self, docnum):
segment, segdocnum = self._segment_and_docnum(docnum)
return segment.is_deleted(segdocnum)
def reader(self, reuse=None):
from whoosh.filedb.fileindex import FileIndex
self._check_state()
return FileIndex._reader(self.storage, self.schema, self.segments,
self.generation, reuse=reuse)
def iter_postings(self):
return self.pool.iter_postings()
def add_postings(self, lengths, items, startdoc, docmap):
# items = (fieldname, text, docnum, weight, valuestring) ...
schema = self.schema
# Make a generator to strip out deleted fields and renumber the docs
# before passing them down to the field writer
def gen():
for fieldname, text, docnum, weight, valuestring in items:
if fieldname not in schema:
continue
if docmap is not None:
newdoc = docmap[docnum]
else:
newdoc = startdoc + docnum
yield (fieldname, text, newdoc, weight, valuestring)
self.fieldwriter.add_postings(schema, lengths, gen())
def _make_docmap(self, reader, newdoc):
# If the reader has deletions, make a dictionary mapping the docnums
# of undeleted documents to new sequential docnums starting at newdoc
hasdel = reader.has_deletions()
if hasdel:
docmap = {}
for docnum in reader.all_doc_ids():
if reader.is_deleted(docnum):
continue
docmap[docnum] = newdoc
newdoc += 1
else:
docmap = None
newdoc += reader.doc_count_all()
# Return the map and the new lowest unused document number
return docmap, newdoc
def _merge_per_doc(self, reader, docmap):
schema = self.schema
newdoc = self.docnum
perdocwriter = self.perdocwriter
sharedfields = set(schema.names()) & set(reader.schema.names())
for docnum in reader.all_doc_ids():
# Skip deleted documents
if docmap and docnum not in docmap:
continue
# Renumber around deletions
if docmap:
newdoc = docmap[docnum]
# Get the stored fields
d = reader.stored_fields(docnum)
# Start a new document in the writer
perdocwriter.start_doc(newdoc)
# For each field in the document, copy its stored value,
# length, and vectors (if any) to the writer
for fieldname in sharedfields:
field = schema[fieldname]
length = (reader.doc_field_length(docnum, fieldname, 0)
if field.scorable else 0)
perdocwriter.add_field(fieldname, field, d.get(fieldname),
length)
if field.vector and reader.has_vector(docnum, fieldname):
v = reader.vector(docnum, fieldname)
perdocwriter.add_vector_matcher(fieldname, field, v)
# Finish the new document
perdocwriter.finish_doc()
newdoc += 1
def _merge_fields(self, reader, docmap):
# Add inverted index postings to the pool, renumbering document number
# references as necessary
add_post = self.pool.add
# Note: iter_postings() only yields postings for undeleted docs
for p in renumber_postings(reader, self.docnum, docmap):
add_post(p)
def add_reader(self, reader):
self._check_state()
# Make a docnum map to renumber around deleted documents
docmap, newdoc = self._make_docmap(reader, self.docnum)
# Add per-document values
self._merge_per_doc(reader, docmap)
# Add field postings
self._merge_fields(reader, docmap)
self.docnum = newdoc
self._added = True
def _check_fields(self, schema, fieldnames):
# Check if the caller gave us a bogus field
for name in fieldnames:
if name not in schema:
raise UnknownFieldError("No field named %r in %s"
% (name, schema))
def add_document(self, **fields):
self._check_state()
perdocwriter = self.perdocwriter
schema = self.schema
docnum = self.docnum
add_post = self.pool.add
docboost = self._doc_boost(fields)
fieldnames = sorted([name for name in fields.keys()
if not name.startswith("_")])
self._check_fields(schema, fieldnames)
perdocwriter.start_doc(docnum)
# For each field...
for fieldname in fieldnames:
value = fields.get(fieldname)
if value is None:
continue
field = schema[fieldname]
length = 0
if field.indexed:
# TODO: Method for adding progressive field values, ie
# setting start_pos/start_char?
fieldboost = self._field_boost(fields, fieldname, docboost)
# Ask the field to return a list of (text, weight, valuestring)
# tuples and the number of terms in the field
items = field.index(value)
# Only store the length if the field is marked scorable
scorable = field.scorable
# Add the terms to the pool
for text, freq, weight, valuestring in items:
#assert w != ""
weight *= fieldboost
if scorable:
length += freq
add_post((fieldname, text, docnum, weight, valuestring))
if field.separate_spelling():
# For fields which use different tokens for spelling, insert
# fake postings for the spellable words, where docnum=None
# means "this is a spelling word"
# TODO: think of something less hacktacular
for text in field.spellable_words(value):
add_post((fieldname, text, None, None, None))
vformat = field.vector
if vformat:
analyzer = field.analyzer
vitems = sorted(vformat.word_values(value, analyzer,
mode="index"))
perdocwriter.add_vector_items(fieldname, field, vitems)
# Figure out what value to store for this field
storedval = None
if field.stored:
storedkey = "_stored_%s" % fieldname
if storedkey in fields:
storedval = fields.get(storedkey)
else:
storedval = value
# Add the stored value and length for this field to the per-
# document writer
perdocwriter.add_field(fieldname, field, storedval, length)
perdocwriter.finish_doc()
self._added = True
self.docnum += 1
def doc_count(self):
return self.docnum - self.docbase
def get_segment(self):
newsegment = self.newsegment
newsegment.doccount = self.doc_count()
return newsegment
def _merge_segments(self, mergetype, optimize, merge):
if mergetype:
pass
elif optimize:
mergetype = OPTIMIZE
elif not merge:
mergetype = NO_MERGE
else:
mergetype = MERGE_SMALL
# Call the merge policy function. The policy may choose to merge
# other segments into this writer's pool
return mergetype(self, self.segments)
def _flush_segment(self):
lengths = self.perdocwriter.lengths_reader()
postings = self.pool.iter_postings()
self.fieldwriter.add_postings(self.schema, lengths, postings)
def _close_segment(self):
self.perdocwriter.close()
self.fieldwriter.close()
self.pool.cleanup()
def _assemble_segment(self):
if self.compound:
# Assemble the segment files into a compound file
newsegment = self.get_segment()
newsegment.create_compound_file(self.storage)
newsegment.compound = True
def _commit_toc(self, segments):
# Write a new TOC with the new segment list (and delete old files)
self.codec.commit_toc(self.storage, self.indexname, self.schema,
segments, self.generation)
def _finish(self):
if self.writelock:
self.writelock.release()
self.is_closed = True
#self.storage.close()
def _partial_segment(self):
# For use by a parent multiprocessing writer: Closes out the segment
# but leaves the pool files intact so the parent can access them
self._check_state()
self.perdocwriter.close()
self.fieldwriter.close()
# Don't call self.pool.cleanup()! We want to grab the pool files.
return self.get_segment()
def commit(self, mergetype=None, optimize=False, merge=True):
"""Finishes writing and saves all additions and changes to disk.
There are four possible ways to use this method::
# Merge small segments but leave large segments, trying to
# balance fast commits with fast searching:
writer.commit()
# Merge all segments into a single segment:
writer.commit(optimize=True)
# Don't merge any existing segments:
writer.commit(merge=False)
# Use a custom merge function
writer.commit(mergetype=my_merge_function)
:param mergetype: a custom merge function taking a Writer object and
segment list as arguments, and returning a new segment list. If you
supply a ``mergetype`` function, the values of the ``optimize`` and
``merge`` arguments are ignored.
:param optimize: if True, all existing segments are merged with the
documents you've added to this writer (and the value of the
``merge`` argument is ignored).
:param merge: if False, do not merge small segments.
"""
self._check_state()
try:
# Merge old segments if necessary
finalsegments = self._merge_segments(mergetype, optimize, merge)
if self._added:
# Finish writing segment
self._flush_segment()
# Close segment files
self._close_segment()
# Assemble compound segment if necessary
self._assemble_segment()
# Add the new segment to the list of remaining segments
# returned by the merge policy function
finalsegments.append(self.get_segment())
else:
# Close segment files
self._close_segment()
# Write TOC
self._commit_toc(finalsegments)
finally:
# Final cleanup
self._finish()
def cancel(self):
self._check_state()
self._close_segment()
self._finish()
# Retroactively add spelling files to an existing index
def add_spelling(ix, fieldnames, commit=True):
"""Adds spelling files to an existing index that was created without
them, and modifies the schema so the given fields have the ``spelling``
attribute. Only works on filedb indexes.
>>> ix = index.open_dir("testindex")
>>> add_spelling(ix, ["content", "tags"])
:param ix: a :class:`whoosh.filedb.fileindex.FileIndex` object.
:param fieldnames: a list of field names to create word graphs for.
:param force: if True, overwrites existing word graph files. This is only
useful for debugging.
"""
from whoosh.filedb.filereading import SegmentReader
from whoosh.support import dawg
writer = ix.writer()
storage = writer.storage
schema = writer.schema
segments = writer.segments
for segment in segments:
r = SegmentReader(storage, schema, segment)
f = segment.create_file(storage, ".dag")
gw = dawg.GraphWriter(f)
for fieldname in fieldnames:
gw.start_field(fieldname)
for word in r.lexicon(fieldname):
gw.insert(word)
gw.finish_field()
gw.close()
for fieldname in fieldnames:
schema[fieldname].spelling = True
if commit:
writer.commit(merge=False)
|
mozilla/popcorn_maker
|
vendor-local/lib/python/whoosh/filedb/filewriting.py
|
Python
|
bsd-3-clause
| 21,271
| 0.000705
|
import sublime
import unittest
import os
import sys
class TestImport(unittest.TestCase):
mpath = None
@classmethod
def setUpClass(cls):
basedir = os.path.dirname(__file__)
mpath = os.path.normpath(os.path.join(
basedir, "..", "st3_{}_{}".format(sublime.platform(), sublime.arch())))
if mpath not in sys.path:
cls.mpath = mpath
sys.path.append(mpath)
def test_import(self):
from winpty import PtyProcess
self.assertTrue("winpty" in sys.modules)
proc = PtyProcess.spawn('cmd.exe')
self.assertTrue(proc.isalive())
proc.terminate(True)
@classmethod
def tearDownClass(cls):
if not cls.mpath:
return
mpath = cls.mpath
if mpath in sys.path:
sys.path.remove(mpath)
if "winpty" in sys.modules:
del sys.modules["winpty"]
|
dmilith/SublimeText3-dmilith
|
Packages/Debugger/modules/libs/pywinpty/tests/test_import.py
|
Python
|
mit
| 909
| 0.0011
|
#!/usr/bin/env python
#
# Copyright 2011 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Example of a urllib2 based HTTP request handler."""
from pprint import pprint
from StringIO import StringIO
import sys
import urllib2
import splunk.client as client
import utils
def request(url, message, **kwargs):
method = message['method'].lower()
data = message.get('body', "") if method == 'post' else None
headers = dict(message.get('headers', []))
context = urllib2.Request(url, data, headers)
try:
response = urllib2.urlopen(context)
except urllib2.HTTPError, response:
pass # Propagate HTTP errors via the returned response message
return {
'status': response.code,
'reason': response.msg,
'headers': response.info().dict,
'body': StringIO(response.read())
}
opts = utils.parse(sys.argv[1:], {}, ".splunkrc")
service = client.connect(handler=request, **opts.kwargs)
pprint(service.apps.list())
|
rmak/splunk-sdk-python
|
examples/handlers/handler_urllib2.py
|
Python
|
apache-2.0
| 1,492
| 0.002681
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
"""Tests for reportlab.lib.utils
"""
__version__=''' $Id$ '''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, printLocation
setOutDir(__name__)
import os, time, sys
import reportlab
import unittest
from reportlab.lib import colors
from reportlab.lib.utils import recursiveImport, recursiveGetAttr, recursiveSetAttr, rl_isfile, \
isCompactDistro, isPy3
def _rel_open_and_read(fn):
from reportlab.lib.utils import open_and_read
from reportlab.lib.testutils import testsFolder
cwd = os.getcwd()
os.chdir(testsFolder)
try:
return open_and_read(fn)
finally:
os.chdir(cwd)
class ImporterTestCase(unittest.TestCase):
"Test import utilities"
count = 0
def setUp(self):
from reportlab.lib.utils import get_rl_tempdir
s = repr(int(time.time())) + repr(self.count)
self.__class__.count += 1
self._tempdir = get_rl_tempdir('reportlab_test','tmp_%s' % s)
if not os.path.isdir(self._tempdir):
os.makedirs(self._tempdir,0o700)
_testmodulename = os.path.join(self._tempdir,'test_module_%s.py' % s)
f = open(_testmodulename,'w')
f.write('__all__=[]\n')
f.close()
if sys.platform=='darwin' and isPy3:
time.sleep(0.3)
self._testmodulename = os.path.splitext(os.path.basename(_testmodulename))[0]
def tearDown(self):
from shutil import rmtree
rmtree(self._tempdir,1)
def test1(self):
"try stuff known to be in the path"
m1 = recursiveImport('reportlab.pdfgen.canvas')
import reportlab.pdfgen.canvas
assert m1 == reportlab.pdfgen.canvas
def test2(self):
"try under a well known directory NOT on the path"
from reportlab.lib.testutils import testsFolder
D = os.path.join(testsFolder,'..','tools','pythonpoint')
fn = os.path.join(D,'stdparser.py')
if rl_isfile(fn) or rl_isfile(fn+'c') or rl_isfile(fn+'o'):
m1 = recursiveImport('stdparser', baseDir=D)
def test3(self):
"ensure CWD is on the path"
try:
cwd = os.getcwd()
os.chdir(self._tempdir)
m1 = recursiveImport(self._testmodulename)
finally:
os.chdir(cwd)
def test4(self):
"ensure noCWD removes current dir from path"
try:
cwd = os.getcwd()
os.chdir(self._tempdir)
import sys
try:
del sys.modules[self._testmodulename]
except KeyError:
pass
self.assertRaises(ImportError,
recursiveImport,
self._testmodulename,
noCWD=1)
finally:
os.chdir(cwd)
def test5(self):
"recursive attribute setting/getting on modules"
import reportlab.lib.units
inch = recursiveGetAttr(reportlab, 'lib.units.inch')
assert inch == 72
recursiveSetAttr(reportlab, 'lib.units.cubit', 18*inch)
cubit = recursiveGetAttr(reportlab, 'lib.units.cubit')
assert cubit == 18*inch
def test6(self):
"recursive attribute setting/getting on drawings"
from reportlab.graphics.charts.barcharts import sampleH1
drawing = sampleH1()
recursiveSetAttr(drawing, 'barchart.valueAxis.valueMax', 72)
theMax = recursiveGetAttr(drawing, 'barchart.valueAxis.valueMax')
assert theMax == 72
def test7(self):
"test open and read of a simple relative file"
b = _rel_open_and_read('../docs/images/Edit_Prefs.gif')
def test8(self):
"test open and read of a relative file: URL"
b = _rel_open_and_read('file:../docs/images/Edit_Prefs.gif')
def test9(self):
"test open and read of an http: URL"
from reportlab.lib.utils import open_and_read
b = open_and_read('http://www.reportlab.com/rsrc/encryption.gif')
def test10(self):
"test open and read of a simple relative file"
from reportlab.lib.utils import open_and_read, getBytesIO
b = getBytesIO(_rel_open_and_read('../docs/images/Edit_Prefs.gif'))
b = open_and_read(b)
def test11(self):
"test open and read of an RFC 2397 data URI with base64 encoding"
result = _rel_open_and_read('data:image/gif;base64,R0lGODdhAQABAIAAAP///////ywAAAAAAQABAAACAkQBADs=')
self.assertEquals(result,b'GIF87a\x01\x00\x01\x00\x80\x00\x00\xff\xff\xff\xff\xff\xff,\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;')
def test12(self):
"test open and read of an RFC 2397 data URI without an encoding"
result = _rel_open_and_read('data:text/plain;,Hello%20World')
self.assertEquals(result,b'Hello World')
def testRecursiveImportErrors(self):
"check we get useful error messages"
try:
m1 = recursiveImport('reportlab.pdfgen.brush')
self.fail("Imported a nonexistent module")
except ImportError as e:
self.assertIn('reportlab.pdfgen.brush',str(e))
try:
m1 = recursiveImport('totally.non.existent')
self.fail("Imported a nonexistent module")
except ImportError as e:
self.assertIn('totally',str(e))
try:
#import a module in the 'tests' directory with a bug
m1 = recursiveImport('unimportable')
self.fail("Imported a buggy module")
except Exception as e:
self.assertIn(reportlab.isPy3 and 'division by zero' or 'integer division or modulo by zero',str(e))
def makeSuite():
return makeSuiteForClasses(ImporterTestCase)
if __name__ == "__main__": #noruntests
unittest.TextTestRunner().run(makeSuite())
printLocation()
|
kanarelo/reportlab
|
tests/test_lib_utils.py
|
Python
|
bsd-3-clause
| 5,924
| 0.006752
|
# ############################################################################
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
# ############################################################################
import pypom
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from features.pages.common import CommonPageMixin
from features.fields.fields import InputField, SelectField, ButtonField
class SearchEntityPage(CommonPageMixin, pypom.Page):
URL_TEMPLATE = '/entities/'
acronym = InputField(By.ID, 'id_acronym')
title = InputField(By.ID, 'id_title')
entity_type = SelectField(By.ID, "id_entity_type")
search = ButtonField(By.ID, "bt_submit_entity_search")
def find_acronym_in_table(self, row: int = 1):
return self.find_element(By.ID, 'td_entity_%d' % row).text
class SearchOrganizationPage(CommonPageMixin, pypom.Page):
URL_TEMPLATE = '/organizations/'
acronym = InputField(By.ID, 'id_acronym')
name = InputField(By.ID, 'id_name')
type = SelectField(By.ID, "id_type")
search = ButtonField(By.ID, "bt_submit_organization_search")
def find_acronym_in_table(self, row: int = 1):
return self.find_element(By.ID, 'td_organization_%d' % row).text
class SearchStudentPage(CommonPageMixin, pypom.Page):
URL_TEMPLATE = '/students/'
registration_id = InputField(By.ID, 'id_registration_id')
name = InputField(By.ID, 'id_name')
search = ButtonField(By.ID, "bt_submit_student_search")
def find_registration_id_in_table(self, row: int = 1):
return self.find_element(By.ID, 'td_student_%d' % row).text
def find_name_in_table(self):
names = []
row = 1
last = False
while not last:
try:
elt = self.find_element(By.ID, 'spn_student_name_%d' % row)
names.append(elt.text)
row += 1
except NoSuchElementException as e:
return names
return names
|
uclouvain/OSIS-Louvain
|
features/steps/utils/pages.py
|
Python
|
agpl-3.0
| 3,065
| 0.000326
|
# -*- coding: utf-8 -*-
#
# tm1640-rpi documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 12 19:52:17 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../src/python/'))
# hack for readthedocs to cause it to run doxygen first
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
from subprocess import call
call('doxygen')
del call
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'breathe']
breathe_projects = {'tm1640-rpi': 'doxygen-xml/'}
breathe_default_project = 'tm1640-rpi'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tm1640-rpi'
copyright = u'2013, Michael Farrell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tm1640-rpidoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tm1640-rpi.tex', u'tm1640-rpi Documentation',
u'Michael Farrell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tm1640-rpi', u'tm1640-rpi Documentation',
[u'Michael Farrell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tm1640-rpi', u'tm1640-rpi Documentation',
u'Michael Farrell', 'tm1640-rpi', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
autoclass_content = 'both'
|
micolous/tm1640-rpi
|
doc/source/conf.py
|
Python
|
gpl-3.0
| 8,144
| 0.007859
|
# encoding: utf-8
import json
import time
from kubectl_data import *
from kubectl_ports import *
from kubectl_wrapper import *
TMP_FILEPATH = '/tmp/'
def create_tmp_json(data, service_path):
with open(service_path, 'w') as out:
json.dump(data, out, indent=2)
def sub_start(service_name, data, kube_type):
filepath = TMP_FILEPATH + service_name + '-' + kube_type + '.json'
kube_data = data.get(kube_type, dict())
create_tmp_json(kube_data, filepath)
create(filepath)
def sub_stop(service_name, data, kube_type):
filepath = TMP_FILEPATH + service_name + '-' + kube_type + '.json'
kube_data = data.get(kube_type, dict())
create_tmp_json(kube_data, filepath)
delete(filepath)
'''
Actions
'''
def kubectl_used_ports(subdomain):
return get_used_ports(subdomain)
def kubectl_available_ports(subdomain):
return get_available_ports(subdomain)
def kubectl_register(filepath):
data = get_data_yaml(filepath)
register_data(data)
def kubectl_start(service_name):
data = get_data(service_name)
sub_start(service_name, data, 'service')
time.sleep(1)
sub_start(service_name, data, 'replicationcontroller')
def kubectl_stop(service_name):
data = get_data(service_name)
sub_stop(service_name, data, 'replicationcontroller')
sub_stop(service_name, data, 'service')
time.sleep(1)
def kubectl_list():
return get_all_names()
def kubectl_startall():
services = get_all_names()
for service in services:
kubectl_start(service)
def kubectl_status(ressources, all_namespaces):
return status(ressources, all_namespaces)
def kubectl_status_nodes():
return nodes()
def kubectl_logs(service_name, f):
pods = pods_name_from_label(service_name)
pods_list = filter(lambda x: x != '', pods.split('\n'))
if not pods_list:
print 'No pods found'
return
elif len(pods_list) > 1:
format_list = '\n'.join(pods_list) + '\n\nName: '
answer = raw_input('Multiple pods under this service, please choose one by selecting the name: \n' + format_list)
return logs(answer, f)
else:
pod_name = pods_list[0].split(' ')[0]
return logs(pod_name, f)
def kubectl_describe(service_name):
found_pods_and_exec_func(service_name, describe)
def kubectl_connect(service_name):
found_pods_and_exec_func(service_name, connect)
|
snipsco/teleport
|
src/kubeconfig/kubectl_actions.py
|
Python
|
mit
| 2,287
| 0.020988
|
"""
Configuration for bookmarks Django app
"""
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from edx_django_utils.plugins import PluginSettings, PluginURLs
from openedx.core.djangoapps.plugins.constants import ProjectType, SettingsType
class BookmarksConfig(AppConfig):
"""
Configuration class for bookmarks Django app
"""
name = 'openedx.core.djangoapps.bookmarks'
verbose_name = _("Bookmarks")
plugin_app = {
PluginURLs.CONFIG: {
ProjectType.LMS: {
PluginURLs.NAMESPACE: '',
PluginURLs.REGEX: '^api/bookmarks/',
PluginURLs.RELATIVE_PATH: 'urls',
}
},
PluginSettings.CONFIG: {
ProjectType.LMS: {
SettingsType.PRODUCTION: {PluginSettings.RELATIVE_PATH: 'settings.production'},
SettingsType.COMMON: {PluginSettings.RELATIVE_PATH: 'settings.common'},
}
}
}
def ready(self):
# Register the signals handled by bookmarks.
from . import signals # lint-amnesty, pylint: disable=unused-import
|
eduNEXT/edunext-platform
|
openedx/core/djangoapps/bookmarks/apps.py
|
Python
|
agpl-3.0
| 1,146
| 0.001745
|
import logging
from pylons import config, request, response, session, tmpl_context as c
from pylons.controllers.util import abort
from fmod.lib.base import BaseController, render
from fmod import model
from sqlalchemy import desc
log = logging.getLogger(__name__)
from hashlib import md5
import time, datetime
#useful for this case.
from fmod.model import Ping, ImageHistory
from flickrapi import FlickrAPI
class PingController(BaseController):
def index(self):
c.results=[]
c.username = session.get('user')
c.fl_mod = session.get('mod',False)
images = {}
flSave = False
for ping in Ping.query().filter(Ping.fl_decided==False).order_by(Ping.id):
if not images.get(ping.image):
img = ping.Image_fromPing()
if img.in_pool():
images[ping.image] = True
c.results.append(ping)
if len(c.results) >= 2:
break
else:
flSave=True
ping.fl_decided=True
if flSave: ping.commit()
return render('ping.mako')
def more(self, id=None):
# id will be something like d_ping_[ping.id]
# so, I want to get a ping where id > that one.
pid = id.split('_')[-1]
try:
pid = int(pid)
except:
log.debug("couldn't identify the ping %s "%id)
return ""
c.username = session.get('user')
c.fl_mod = session.get('mod',False)
filter_images = dict([(ping.image,True) for ping in
Ping.query().filter(Ping.fl_decided==False).filter(Ping.id<=pid)])
for ping in Ping.query().filter(Ping.fl_decided==False).filter(Ping.id>pid).order_by(Ping.id):
if not ping.image in filter_images:
img = ping.Image_fromPing()
if img.in_pool():
c.ping=ping
c.image=ping.image
c.atts = img.all_atts()
return render('one_ping.mako')
else:
ping.fl_decided=True
ping.commit()
def _fmtTime(self, t=None):
if t!= None and hasattr(t, 'timetuple'):
t = time.mktime(t.timetuple())
return time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(t))
def rss(self):
response.charset='utf8'
response.headers['content-type'] = 'text/xml; charset=UTF-8'
c.items=[]
images = {}
for ping in Ping.query().filter(Ping.fl_decided==False).order_by(desc(Ping.id)):
if not images.get(ping.image):
img = ping.Image_fromPing()
if img.in_pool():
images[ping.image] = True
img.all_atts()
c.items.append((ping,img))
if len(c.results) >= 20:
break
c.fmtTime = self._fmtTime
return render('rss.mako')
def ping(self):
log.debug('In Ping')
params = {'nsid':'nsid', # the pinging user, this is static.
'uid':'username', # our userid
'id' :'image', # image id
'own':'owner', # image owner
'sec':'secret', # image secret, from flickr
'con':'context', # context - in group pool
}
# 's':None # signature
# check sig --
nsid = request.params.get('nsid')
if nsid:
u = model.User.get_byNsid(nsid)
else:
u = model.User.get_byName(request.params.get('uid'))
if not u:
log.debug('user not found for ping: %s'%request.query_string)
return ''
log.debug(request.query_string)
log.debug(request.query_string[:-35]+u.secret)
log.debug(request.params.get('s'))
log.debug(md5(request.query_string[:-35]+u.secret).hexdigest().lower())
if md5(request.query_string[:-35]+u.secret).hexdigest().lower() != request.params.get('s'):
log.debug('bad signature')
return ''
else:
log.debug('good signature')
p = Ping()
for (arg, att) in params.items():
# must filter!!!
val = request.params.get(arg,'')
log.debug("setting %s to %s"% (att, val))
if val:
setattr(p, att, val)
p.username = u.username
#p.begin()
p.save()
p.commit()
if request.params.get('v',False) == '2':
#version 2 response.
response.headers['content-type'] = 'text/javascript'
return """YUI().use('node', function(Y) {Y.one('#context-num-pool-71917374__at__N00').insert(document.createTextNode(' (Flagged) '), 'before')})"""
else:
#version 1 response
""" q='uid='+uid+'&id='+p.id+'&own='+p.ownerNsid+'&sec='+p.secret+'&con='+nextprev_currentContextID;
i.src='http://192.168.10.99:5000/p?'+q+'s='+md5_calcMD5(q+s);
"""
response.headers['content-type'] = 'text/javascript'
return """Y.D.get('contextTitle_pool71917374@N00').appendChild(document.createTextNode('(Flagged)'))"""
def dup_scan(self):
log.debug('dup ping')
fapi = FlickrAPI(config['api_key'], config['api_secret'], token=config['api_token'])
try:
rsp = fapi.groups_pools_getPhotos(api_key=config['api_key'],
group_id=config['group_id'],
extras='last_update',
per_page='50',
page='1',
token=config['api_token'])
except Exception,msg:
log.debug(msg.args)
return False
photos = rsp.find('photos')
for photo in photos.getchildren():
image = photo.get('id')
dt = int(photo.get('dateadded'))
if ImageHistory.get(image=image, dt=dt):
log.debug('found high water mark, quitting')
break
if ImageHistory.get_all(image=image):
log.debug('found a re-add')
p = Ping()
p.image = image
p.owner = photo.get('owner')
p.reason = "Bump"
p.username = 'RoboMod'
p.save()
Ping.commit()
ih = ImageHistory()
ih.image = image
ih.dt = dt
ih.save()
ImageHistory.commit()
return "successful"
|
wiredfool/fmod
|
fmod/controllers/ping.py
|
Python
|
gpl-2.0
| 5,311
| 0.046131
|
from api import ServerError,NoAccessError,SimpleTax
|
SimpleTax/python-simpletax
|
simpletax/__init__.py
|
Python
|
bsd-3-clause
| 52
| 0.038462
|
# !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = 'CVtek dev'
__credits__ = []
__version__ = "1.0"
__maintainer__ = "CVTek dev"
__status__ = "Development"
__model_name__ = 'sr_crianca.SRCrianca'
import auth, base_models
from orm import *
from form import *
class SRCrianca(Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'sr_crianca'
self.__title__ ='Inscrição e Identificação da Criança'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
self.__get_options__ = ['nome'] # define tambem o campo a ser mostrado no m2m, independentemente da descricao no field do m2m
self.__order_by__ = 'sr_crianca.nome'
self.__tabs__ = [
('Pré-Natal', ['sr_pre_natal']),
('Neo-Natal', ['sr_neo_natal']),
('Irmãos', ['sr_crianca']),
]
#choice field com a estrutura de saude
self.numero_inscricao = integer_field(view_order = 1, name = 'Nº de Inscrição', size = 40)
self.primeira_consulta = date_field(view_order = 2, name = 'Primeira Consulta', size=40, args = 'required', default = datetime.date.today(), onlist = False)
self.nome = string_field(view_order = 3, name = 'Nome', size = 70, onlist = True)
self.sexo = combo_field(view_order = 4, name = 'Sexo', size = 40, default = 'Feminino', options = [('feminino','Feminino'), ('masculino','Masculino')], onlist = True)
self.data_nascimento = date_field(view_order = 5, name = 'Data Nascimento', size=40, args = 'required', onlist = True)
self.hora_nascimento = time_field(view_order=7, name ='Hora Nascimento', size=40, onlist=False, args='required')
self.numero_registo = string_field(view_order = 8, name = 'Nº Registo', size = 40, onlist = False)
self.data_registo = date_field(view_order = 9, name = 'Data Registo', size=40, args = 'required')
self.nome_pai = string_field(view_order = 10, name = 'Nome do Pai', size = 60, onlist=False)
self.nome_mae = string_field(view_order = 11, name = 'Nome do Mãe', size = 60)
self.endereco_familia = text_field(view_order=12, name='Endereço Familia', size=70, args="rows=30", onlist=False, search=False)
self.telefone = string_field(view_order = 13, name = 'Telefone', size = 40, onlist = True)
self.estado = combo_field(view_order = 14, name = 'Estado', size = 40, default = 'active', options = [('active','Activo'), ('canceled','Cancelado')], onlist = True)
self.sr_pre_natal = list_field(view_order=15, name = 'Informações Pré-Natal', fields=['duracao_gravidez'], condition="crianca='{id}'", model_name='sr_pre_natal.SRPreNatal', list_edit_mode='inline', onlist = False)
self.sr_neo_natal = list_field(view_order=16, name = 'Informações Neo-Natal', column='local_parto', condition="sr_crianca='{id}'", model_name='sr_neo_natal.SRNeoNatal', list_edit_mode='inline', onlist = False)
|
IdeaSolutionsOnline/ERP4R
|
core/objs/sr_crianca.py
|
Python
|
mit
| 3,036
| 0.044069
|
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on 07.04.2015
@author: marscher
'''
import unittest
import tempfile
import numpy as np
from pyemma.coordinates.data.numpy_filereader import NumPyFileReader
from pyemma.util.log import getLogger
import shutil
class TestNumPyFileReader(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.logger = getLogger(cls.__class__.__name__)
d = np.arange(3 * 100).reshape((100, 3))
d2 = np.arange(300, 900).reshape((200,3))
d_1d = np.random.random(100)
cls.dir = tempfile.mkdtemp(prefix='pyemma_npyreader')
cls.f1 = tempfile.mktemp(suffix='.npy', dir=cls.dir)
cls.f2 = tempfile.mktemp(suffix='.npy', dir=cls.dir)
cls.f3 = tempfile.mktemp(suffix='.npz', dir=cls.dir)
cls.f4 = tempfile.mktemp(suffix='.npy', dir=cls.dir)
# 2d
np.save(cls.f1, d)
np.save(cls.f4, d2)
# 1d
np.save(cls.f2, d_1d)
np.savez(cls.f3, d, d)
cls.files2d = [cls.f1, cls.f4] #cls.f3]
cls.files1d = [cls.f2]
cls.d = d
cls.d_1d = d_1d
cls.npy_files = [f for f in cls.files2d if f.endswith('.npy')]
cls.npz = cls.f3
return cls
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.dir, ignore_errors=True)
def test_only_npy(self):
reader = NumPyFileReader(self.npy_files)
from_files = [np.load(f) for f in self.npy_files]
concatenated = np.vstack(from_files)
output = reader.get_output()
self.assertEqual(reader.number_of_trajectories(), len(self.npy_files))
self.assertEqual(reader.n_frames_total(), concatenated.shape[0])
for x, y in zip(output, from_files):
np.testing.assert_array_almost_equal(x, y)
def test_small_chunks(self):
reader = NumPyFileReader(self.npy_files)
reader.chunksize = 30
from_files = [np.load(f) for f in self.npy_files]
concatenated = np.vstack(from_files)
output = reader.get_output()
self.assertEqual(reader.number_of_trajectories(), len(self.npy_files))
self.assertEqual(reader.n_frames_total(), concatenated.shape[0])
for x, y in zip(output, from_files):
np.testing.assert_array_almost_equal(x, y)
def testSingleFile(self):
reader = NumPyFileReader(self.npy_files[0])
self.assertEqual(reader.n_frames_total(), self.d.shape[0])
@unittest.skip("npz currently unsupported")
def test_npz(self):
reader = NumPyFileReader(self.npz)
all_data = reader.get_output()
fh = np.load(self.npz)
data = [x[1] for x in fh.items()]
fh.close()
self.assertEqual(reader.number_of_trajectories(), len(data))
for outp, inp in zip(all_data, data):
np.testing.assert_equal(outp, inp)
def test_stridden_access(self):
reader = NumPyFileReader(self.f1)
reader.chunksize = 10
wanted = np.load(self.f1)
for stride in [2, 3, 5, 7, 15]:
first_traj = reader.get_output(stride=stride)[0]
np.testing.assert_equal(first_traj, wanted[::stride],
"did not match for stride %i" % stride)
def test_lagged_stridden_access(self):
reader = NumPyFileReader(self.f1)
strides = [2, 3, 5, 7, 15]
lags = [1, 3, 7, 10, 30]
for stride in strides:
for lag in lags:
chunks = []
for _, _, Y in reader.iterator(stride, lag):
chunks.append(Y)
chunks = np.vstack(chunks)
np.testing.assert_equal(chunks, self.d[lag::stride])
def test_lagged_stridden_access_multiple_files(self):
reader = NumPyFileReader(self.files2d)
print reader.trajectory_lengths()
strides = [2, 3, 5, 7, 15]
lags = [1, 3, 7, 10, 30]
for stride in strides:
for lag in lags:
chunks = {i: [] for i in xrange(reader.number_of_trajectories())}
for itraj, _, Y in reader.iterator(stride, lag):
chunks[itraj].append(Y)
for i, k in enumerate(chunks.itervalues()):
stack = np.vstack(k)
d = np.load(self.files2d[i])
np.testing.assert_equal(stack, d[lag::stride],
"not equal for stride=%i"
" and lag=%i" % (stride, lag))
if __name__ == "__main__":
unittest.main()
|
arokem/PyEMMA
|
pyemma/coordinates/tests/test_numpyfilereader.py
|
Python
|
bsd-2-clause
| 5,975
| 0.002343
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-03 08:56
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('snapventure', '0004_auto_20161102_2043'),
]
operations = [
migrations.CreateModel(
name='Inscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('journey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snapventure.Journey')),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(blank=True, max_length=500)),
('location', models.CharField(blank=True, max_length=30)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='inscription',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='snapventure.Profile'),
),
migrations.AddField(
model_name='journey',
name='inscriptions',
field=models.ManyToManyField(through='snapventure.Inscription', to='snapventure.Profile'),
),
]
|
DomDomPow/snapventure
|
snapventure-backend/snapventure/migrations/0005_auto_20161103_0856.py
|
Python
|
gpl-3.0
| 1,892
| 0.003171
|
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Dir.py 2014/09/27 12:51:43 garyo"
import SCons.Node.FS
import SCons.Scanner
def only_dirs(nodes):
is_Dir = lambda n: isinstance(n.disambiguate(), SCons.Node.FS.Dir)
return list(filter(is_Dir, nodes))
def DirScanner(**kw):
"""Return a prototype Scanner instance for scanning
directories for on-disk files"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = only_dirs
return SCons.Scanner.Base(scan_on_disk, "DirScanner", **kw)
def DirEntryScanner(**kw):
"""Return a prototype Scanner instance for "scanning"
directory Nodes for their in-memory entries"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = None
return SCons.Scanner.Base(scan_in_memory, "DirEntryScanner", **kw)
skip_entry = {}
skip_entry_list = [
'.',
'..',
'.sconsign',
# Used by the native dblite.py module.
'.sconsign.dblite',
# Used by dbm and dumbdbm.
'.sconsign.dir',
# Used by dbm.
'.sconsign.pag',
# Used by dumbdbm.
'.sconsign.dat',
'.sconsign.bak',
# Used by some dbm emulations using Berkeley DB.
'.sconsign.db',
]
for skip in skip_entry_list:
skip_entry[skip] = 1
skip_entry[SCons.Node.FS._my_normcase(skip)] = 1
do_not_scan = lambda k: k not in skip_entry
def scan_on_disk(node, env, path=()):
"""
Scans a directory for on-disk files and directories therein.
Looking up the entries will add these to the in-memory Node tree
representation of the file system, so all we have to do is just
that and then call the in-memory scanning function.
"""
try:
flist = node.fs.listdir(node.abspath)
except (IOError, OSError):
return []
e = node.Entry
for f in filter(do_not_scan, flist):
# Add ./ to the beginning of the file name so if it begins with a
# '#' we don't look it up relative to the top-level directory.
e('./' + f)
return scan_in_memory(node, env, path)
def scan_in_memory(node, env, path=()):
"""
"Scans" a Node.FS.Dir for its in-memory entries.
"""
try:
entries = node.entries
except AttributeError:
# It's not a Node.FS.Dir (or doesn't look enough like one for
# our purposes), which can happen if a target list containing
# mixed Node types (Dirs and Files, for example) has a Dir as
# the first entry.
return []
entry_list = sorted(filter(do_not_scan, list(entries.keys())))
return [entries[n] for n in entry_list]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
stonekyx/binary
|
vendor/scons-local-2.3.4/SCons/Scanner/Dir.py
|
Python
|
gpl-3.0
| 3,751
| 0.002399
|
# Copyright 2004-2010 PyTom <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import renpy
import codecs
import os
import os.path
import time
image_prefixes = None
filenames = None
# Things to check in lint.
#
# Image files exist, and are of the right case.
# Jump/Call targets defined.
# Say whos can evaluate.
# Call followed by say.
# Show/Scene valid.
# At valid.
# With valid.
# Hide maybe valid.
# Expressions can compile.
# The node the report will be about:
report_node = None
# Reports a message to the user.
def report(msg, *args):
if report_node:
out = u"%s:%d " % (renpy.parser.unicode_filename(report_node.filename), report_node.linenumber)
else:
out = ""
out += msg % args
print
print out.encode('utf-8')
added = { }
# Reports additional information about a message, the first time it
# occurs.
def add(msg):
if not msg in added:
added[msg] = True
print unicode(msg).encode('utf-8')
# Trys to evaluate an expression, announcing an error if it fails.
def try_eval(where, expr, additional=None):
try:
renpy.python.py_eval(expr)
except:
report( "Could not evaluate '%s', in %s.", expr, where)
if additional:
add(additional)
# Returns True of the expression can be compiled as python, False
# otherwise.
def try_compile(where, expr):
try:
renpy.python.py_compile_eval_bytecode(expr)
except:
report("'%s' could not be compiled as a python expression, %s.", expr, where)
# This reports an error if we're sure that the image with the given name
# does not exist.
def image_exists(name, expression, tag):
# Add the tag to the set of known tags.
tag = tag or name[0]
image_prefixes[tag] = True
if expression:
return
name = list(name)
names = " ".join(name)
while name:
if tuple(name) in renpy.exports.images:
return
name.pop()
report("The image named '%s' was not declared.", names)
# Only check each file once.
check_file_cache = { }
def check_file(what, fn):
present = check_file_cache.get(fn, None)
if present is True:
return
if present is False:
report("%s uses file '%s', which is not loadable.", what.capitalize(), fn)
return
if not renpy.loader.loadable(fn):
report("%s uses file '%s', which is not loadable.", what.capitalize(), fn)
check_file_cache[fn] = False
return
check_file_cache[fn] = True
try:
renpy.loader.transfn(fn)
except:
return
if renpy.loader.transfn(fn) and \
fn.lower() in filenames and \
fn != filenames[fn.lower()]:
report("Filename case mismatch for %s. '%s' was used in the script, but '%s' was found on disk.", what, fn, filenames[fn.lower()])
add("Case mismatches can lead to problems on Mac, Linux/Unix, and when archiving images. To fix them, either rename the file on disk, or the filename use in the script.")
def check_displayable(what, d):
files = [ ]
def files_callback(img):
files.extend(img.predict_files())
d.predict(files_callback)
for fn in files:
check_file(what, fn)
# Lints ast.Image nodes.
def check_image(node):
name = " ".join(node.imgname)
check_displayable('image %s' % name, renpy.exports.images[node.imgname])
def imspec(t):
if len(t) == 3:
return t[0], None, None, t[1], t[2], 0
if len(t) == 6:
return t[0], t[1], t[2], t[3], t[4], t[5], None
else:
return t
# Lints ast.Show and ast.Scene nodets.
def check_show(node):
# A Scene may have an empty imspec.
if not node.imspec:
return
name, expression, tag, at_list, layer, zorder, behind = imspec(node.imspec)
if layer not in renpy.config.layers and layer not in renpy.config.top_layers:
report("Uses layer '%s', which is not in config.layers.", layer)
image_exists(name, expression, tag)
for i in at_list:
try_eval("the at list of a scene or show statment", i, "Perhaps you forgot to declare, or misspelled, a position?")
# Lints ast.Hide.
def check_hide(node):
name, expression, tag, at_list, layer, zorder, behind = imspec(node.imspec)
tag = tag or name[0]
if layer not in renpy.config.layers and layer not in renpy.config.top_layers:
report("Uses layer '%s', which is not in config.layers.", layer)
if tag not in image_prefixes:
report("The image tag '%s' is not the prefix of a declared image, nor was it used in a show statement before this hide statement.", tag)
# for i in at_list:
# try_eval(node, "at list of hide statment", i)
def check_with(node):
try_eval("a with statement or clause", node.expr, "Perhaps you forgot to declare, or misspelled, a transition?")
def check_user(node):
def error(msg):
report("%s", msg)
renpy.exports.push_error_handler(error)
try:
node.call("lint")
finally:
renpy.exports.pop_error_handler()
try:
node.get_next()
except:
report("Didn't properly report what the next statement should be.")
check_text_tags = renpy.display.text.check_text_tags
def text_checks(s):
msg = renpy.display.text.check_text_tags(s)
if msg:
report("%s (in %s)", msg, repr(s)[1:])
if "%" in s:
state = 0
pos = 0
fmt = ""
while pos < len(s):
c = s[pos]
pos += 1
# Not in a format.
if state == 0:
if c == "%":
state = 1
fmt = "%"
# In a format.
elif state == 1:
fmt += c
if c == "(":
state = 2
elif c in "#0123456780- +hlL":
state = 1
elif c in "diouxXeEfFgGcrs%":
state = 0
else:
report("Unknown string format code '%s' (in %s)", fmt, repr(s)[1:])
state = 0
# In a mapping key.
elif state == 2:
fmt += c
if c == ")":
state = 1
if state != 0:
report("Unterminated string format code '%s' (in %s)", fmt, repr(s)[1:])
def check_say(node):
if node.who:
try_eval("the who part of a say statement", node.who, "Perhaps you forgot to declare a character?")
if node.with_:
try_eval("the with clause of a say statement", node.with_, "Perhaps you forgot to declare, or misspelled, a transition?")
text_checks(node.what)
def check_menu(node):
if node.with_:
try_eval("the with clause of a menu statement", node.with_, "Perhaps you forgot to declare, or misspelled, a transition?")
if not [ (l, c, b) for l, c, b in node.items if b ]:
report("The menu does not contain any selectable choices.")
for l, c, b in node.items:
if c:
try_compile("in the if clause of a menuitem", c)
text_checks(l)
def check_jump(node):
if node.expression:
return
if not renpy.game.script.has_label(node.target):
report("The jump is to nonexistent label '%s'.", node.target)
def check_call(node):
# if not isinstance(node.next.name, basestring):
# report(node, "The call does not have a from clause associated with it.")
# add("You can add from clauses to calls automatically by running the add_from program.")
# add("This is necessary to ensure saves can be loaded even when the script changes.")
if node.expression:
return
if not renpy.game.script.has_label(node.label):
report("The call is to nonexistent label '%s'.", node.label)
def check_while(node):
try_compile("in the condition of the while statement", node.condition)
def check_if(node):
for condition, block in node.entries:
try_compile("in a condition of the if statement", condition)
def check_style(name, s):
if s.indexed:
for i in s.indexed:
check_style(name + "[%r]" % (name,), s.indexed[i])
for p in s.properties:
for k, v in p.iteritems():
kname = name + "." + k
# Treat font specially.
if k.endswith("font"):
check_file(name, v)
e = renpy.style.expansions[k]
# We only need to check the first function.
for prio, propn, func in e:
if func:
v = func(v)
break
if isinstance(v, renpy.display.core.Displayable):
check_displayable(kname, v)
def check_styles():
for name, s in renpy.style.style_map.iteritems():
check_style("Style property style." + name, s)
def lint():
"""
The master lint function, that's responsible for staging all of the
other checks.
"""
renpy.game.lint = True
print codecs.BOM_UTF8
print unicode(renpy.version + " lint report, generated at: " + time.ctime()).encode("utf-8")
# This is used to support the check_image.
global filenames
filenames = { }
for d in renpy.config.searchpath:
for fn in os.listdir(os.path.join(renpy.config.basedir, d)):
filenames[fn.lower()] = fn
# This supports check_hide.
global image_prefixes
image_prefixes = { }
for k in renpy.exports.images:
image_prefixes[k[0]] = True
# Iterate through every statement in the program, processing
# them. We sort them in filename, linenumber order.
all_stmts = [ (i.filename, i.linenumber, i) for i in renpy.game.script.all_stmts ]
all_stmts.sort()
say_words = 0
say_count = 0
menu_count = 0
global report_node
for fn, ln, node in all_stmts:
report_node = node
if isinstance(node, renpy.ast.Image):
check_image(node)
elif isinstance(node, renpy.ast.Show):
check_show(node)
elif isinstance(node, renpy.ast.Scene):
check_show(node)
elif isinstance(node, renpy.ast.Hide):
check_hide(node)
elif isinstance(node, renpy.ast.With):
check_with(node)
elif isinstance(node, renpy.ast.Say):
check_say(node)
say_count += 1
say_words += len(node.what.split())
elif isinstance(node, renpy.ast.Menu):
check_menu(node)
menu_count += 1
elif isinstance(node, renpy.ast.Jump):
check_jump(node)
elif isinstance(node, renpy.ast.Call):
check_call(node)
elif isinstance(node, renpy.ast.While):
check_while(node)
elif isinstance(node, renpy.ast.If):
check_if(node)
elif isinstance(node, renpy.ast.UserStatement):
check_user(node)
report_node = None
check_styles()
for f in renpy.config.lint_hooks:
f()
print
print
print "Statistics:"
print
print "The game contains", say_count, "screens of dialogue."
print "These screens contain a total of", say_words, "words,"
if say_count > 0:
print "for an average of %.1f words per screen." % (1.0 * say_words / say_count)
print "The game contains", menu_count, "menus."
print
if renpy.config.developer:
print "Remember to set config.developer to False before releasing."
print
print "Lint is not a substitute for thorough testing. Remember to update Ren'Py"
print "before releasing. New releases fix bugs and improve compatibility."
|
MSEMJEJME/ReAlistair
|
renpy/lint.py
|
Python
|
gpl-2.0
| 13,359
| 0.007411
|
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with a batch of updates / deletes.
Batches provide the ability to execute multiple operations
in a single request to the Cloud Datastore API.
See
https://cloud.google.com/datastore/docs/concepts/entities#batch_operations
"""
from google.cloud.datastore import helpers
from google.cloud.datastore_v1.proto import datastore_pb2 as _datastore_pb2
class Batch(object):
"""An abstraction representing a collected group of updates / deletes.
Used to build up a bulk mutation.
For example, the following snippet of code will put the two ``save``
operations and the ``delete`` operation into the same mutation, and send
them to the server in a single API request::
>>> from google.cloud import datastore
>>> client = datastore.Client()
>>> batch = client.batch()
>>> batch.begin()
>>> batch.put(entity1)
>>> batch.put(entity2)
>>> batch.delete(key3)
>>> batch.commit()
You can also use a batch as a context manager, in which case
:meth:`commit` will be called automatically if its block exits without
raising an exception::
>>> with batch:
... batch.put(entity1)
... batch.put(entity2)
... batch.delete(key3)
By default, no updates will be sent if the block exits with an error::
>>> with batch:
... do_some_work(batch)
... raise Exception() # rolls back
:type client: :class:`google.cloud.datastore.client.Client`
:param client: The client used to connect to datastore.
"""
_id = None # "protected" attribute, always None for non-transactions
_INITIAL = 0
"""Enum value for _INITIAL status of batch/transaction."""
_IN_PROGRESS = 1
"""Enum value for _IN_PROGRESS status of batch/transaction."""
_ABORTED = 2
"""Enum value for _ABORTED status of batch/transaction."""
_FINISHED = 3
"""Enum value for _FINISHED status of batch/transaction."""
def __init__(self, client):
self._client = client
self._mutations = []
self._partial_key_entities = []
self._status = self._INITIAL
def current(self):
"""Return the topmost batch / transaction, or None."""
return self._client.current_batch
@property
def project(self):
"""Getter for project in which the batch will run.
:rtype: :class:`str`
:returns: The project in which the batch will run.
"""
return self._client.project
@property
def namespace(self):
"""Getter for namespace in which the batch will run.
:rtype: :class:`str`
:returns: The namespace in which the batch will run.
"""
return self._client.namespace
def _add_partial_key_entity_pb(self):
"""Adds a new mutation for an entity with a partial key.
:rtype: :class:`.entity_pb2.Entity`
:returns: The newly created entity protobuf that will be
updated and sent with a commit.
"""
new_mutation = _datastore_pb2.Mutation()
self._mutations.append(new_mutation)
return new_mutation.insert
def _add_complete_key_entity_pb(self):
"""Adds a new mutation for an entity with a completed key.
:rtype: :class:`.entity_pb2.Entity`
:returns: The newly created entity protobuf that will be
updated and sent with a commit.
"""
# We use ``upsert`` for entities with completed keys, rather than
# ``insert`` or ``update``, in order not to create race conditions
# based on prior existence / removal of the entity.
new_mutation = _datastore_pb2.Mutation()
self._mutations.append(new_mutation)
return new_mutation.upsert
def _add_delete_key_pb(self):
"""Adds a new mutation for a key to be deleted.
:rtype: :class:`.entity_pb2.Key`
:returns: The newly created key protobuf that will be
deleted when sent with a commit.
"""
new_mutation = _datastore_pb2.Mutation()
self._mutations.append(new_mutation)
return new_mutation.delete
@property
def mutations(self):
"""Getter for the changes accumulated by this batch.
Every batch is committed with a single commit request containing all
the work to be done as mutations. Inside a batch, calling :meth:`put`
with an entity, or :meth:`delete` with a key, builds up the request by
adding a new mutation. This getter returns the protobuf that has been
built-up so far.
:rtype: iterable
:returns: The list of :class:`.datastore_pb2.Mutation`
protobufs to be sent in the commit request.
"""
return self._mutations
def put(self, entity):
"""Remember an entity's state to be saved during :meth:`commit`.
.. note::
Any existing properties for the entity will be replaced by those
currently set on this instance. Already-stored properties which do
not correspond to keys set on this instance will be removed from
the datastore.
.. note::
Property values which are "text" ('unicode' in Python2, 'str' in
Python3) map to 'string_value' in the datastore; values which are
"bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
When an entity has a partial key, calling :meth:`commit` sends it as
an ``insert`` mutation and the key is completed. On return,
the key for the ``entity`` passed in is updated to match the key ID
assigned by the server.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: :class:`~exceptions.ValueError` if the batch is not in
progress, if entity has no key assigned, or if the key's
``project`` does not match ours.
"""
if self._status != self._IN_PROGRESS:
raise ValueError("Batch must be in progress to put()")
if entity.key is None:
raise ValueError("Entity must have a key")
if self.project != entity.key.project:
raise ValueError("Key must be from same project as batch")
if entity.key.is_partial:
entity_pb = self._add_partial_key_entity_pb()
self._partial_key_entities.append(entity)
else:
entity_pb = self._add_complete_key_entity_pb()
_assign_entity_to_pb(entity_pb, entity)
def delete(self, key):
"""Remember a key to be deleted during :meth:`commit`.
:type key: :class:`google.cloud.datastore.key.Key`
:param key: the key to be deleted.
:raises: :class:`~exceptions.ValueError` if the batch is not in
progress, if key is not complete, or if the key's
``project`` does not match ours.
"""
if self._status != self._IN_PROGRESS:
raise ValueError("Batch must be in progress to delete()")
if key.is_partial:
raise ValueError("Key must be complete")
if self.project != key.project:
raise ValueError("Key must be from same project as batch")
key_pb = key.to_protobuf()
self._add_delete_key_pb().CopyFrom(key_pb)
def begin(self):
"""Begins a batch.
This method is called automatically when entering a with
statement, however it can be called explicitly if you don't want
to use a context manager.
Overridden by :class:`google.cloud.datastore.transaction.Transaction`.
:raises: :class:`ValueError` if the batch has already begun.
"""
if self._status != self._INITIAL:
raise ValueError("Batch already started previously.")
self._status = self._IN_PROGRESS
def _commit(self):
"""Commits the batch.
This is called by :meth:`commit`.
"""
if self._id is None:
mode = _datastore_pb2.CommitRequest.NON_TRANSACTIONAL
else:
mode = _datastore_pb2.CommitRequest.TRANSACTIONAL
commit_response_pb = self._client._datastore_api.commit(
self.project, mode, self._mutations, transaction=self._id
)
_, updated_keys = _parse_commit_response(commit_response_pb)
# If the back-end returns without error, we are guaranteed that
# ``commit`` will return keys that match (length and
# order) directly ``_partial_key_entities``.
for new_key_pb, entity in zip(updated_keys, self._partial_key_entities):
new_id = new_key_pb.path[-1].id
entity.key = entity.key.completed_key(new_id)
def commit(self):
"""Commits the batch.
This is called automatically upon exiting a with statement,
however it can be called explicitly if you don't want to use a
context manager.
:raises: :class:`~exceptions.ValueError` if the batch is not
in progress.
"""
if self._status != self._IN_PROGRESS:
raise ValueError("Batch must be in progress to commit()")
try:
self._commit()
finally:
self._status = self._FINISHED
def rollback(self):
"""Rolls back the current batch.
Marks the batch as aborted (can't be used again).
Overridden by :class:`google.cloud.datastore.transaction.Transaction`.
:raises: :class:`~exceptions.ValueError` if the batch is not
in progress.
"""
if self._status != self._IN_PROGRESS:
raise ValueError("Batch must be in progress to rollback()")
self._status = self._ABORTED
def __enter__(self):
self.begin()
# NOTE: We make sure begin() succeeds before pushing onto the stack.
self._client._push_batch(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is None:
self.commit()
else:
self.rollback()
finally:
self._client._pop_batch()
def _assign_entity_to_pb(entity_pb, entity):
"""Copy ``entity`` into ``entity_pb``.
Helper method for ``Batch.put``.
:type entity_pb: :class:`.entity_pb2.Entity`
:param entity_pb: The entity owned by a mutation.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: The entity being updated within the batch / transaction.
"""
bare_entity_pb = helpers.entity_to_protobuf(entity)
bare_entity_pb.key.CopyFrom(bare_entity_pb.key)
entity_pb.CopyFrom(bare_entity_pb)
def _parse_commit_response(commit_response_pb):
"""Extract response data from a commit response.
:type commit_response_pb: :class:`.datastore_pb2.CommitResponse`
:param commit_response_pb: The protobuf response from a commit request.
:rtype: tuple
:returns: The pair of the number of index updates and a list of
:class:`.entity_pb2.Key` for each incomplete key
that was completed in the commit.
"""
mut_results = commit_response_pb.mutation_results
index_updates = commit_response_pb.index_updates
completed_keys = [
mut_result.key for mut_result in mut_results if mut_result.HasField("key")
] # Message field (Key)
return index_updates, completed_keys
|
tseaver/google-cloud-python
|
datastore/google/cloud/datastore/batch.py
|
Python
|
apache-2.0
| 12,054
| 0.000166
|
# NEVER DO THIS IN SQL!
from Repository.Loader import Loader, LoaderException
from Domain import Grade, Student, Discipline
import sqlite3
class SQLLoader(Loader):
def __init__(self, repo):
self.repo = repo
self.conn = sqlite3.connect(self.repo.getStoragePath() + ".sqlite")
self.cursor = self.conn.cursor()
def save(self):
# serializable = {'students': [], 'disciplines': [], 'grades': []}
self.cursor.execute('''DROP TABLE IF EXISTS students;''')
self.cursor.execute('''DROP TABLE IF EXISTS disciplines;''')
self.cursor.execute('''DROP TABLE IF EXISTS grades;''')
# eww
self.cursor.execute('''CREATE TABLE students (id int, name text)''')
self.cursor.execute('''CREATE TABLE disciplines (id int, name text)''')
self.cursor.execute('''CREATE TABLE grades (did int, sid int, grade int)''')
serializable = {
'students': [(student.getId(), student.getName()) for student in self.repo.getStudents()],
'disciplines': [(discipline.getId(), discipline.getName()) for discipline in
self.repo.getDisciplines()],
'grades': [(grade.getDisciplineId(), grade.getStudentId(), grade.getGrade()) for grade in
self.repo.getGrades()]}
self.cursor.executemany('INSERT INTO students VALUES (?,?)', serializable['students'])
self.cursor.executemany('INSERT INTO disciplines VALUES (?,?)', serializable['disciplines'])
self.cursor.executemany('INSERT INTO grades VALUES (?,?,?)', serializable['grades'])
self.conn.commit()
def load(self):
try:
self.repo._createNewRepo()
for row in self.cursor.execute('SELECT * FROM students'):
self.repo.addStudent(Student.Student(row[0], row[1]), False)
for row in self.cursor.execute('SELECT * FROM disciplines'):
self.repo.addDiscipline(Discipline.Discipline(row[0], row[1]), False)
for row in self.cursor.execute('SELECT * FROM grades'):
self.repo.addGrade(Grade.Grade(row[0], row[1], row[2]), False)
return True
except Exception as ex:
print('[StudentRepository]', ex)
return False
# eval studentCatalogController._repo._converter(0)
|
Zephyrrus/ubb
|
YEAR 1/SEM1/FP/LAB/l6-l9/Repository/SQLLoader.py
|
Python
|
mit
| 2,336
| 0.003853
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2015- Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# https://en.wikipedia.org/wiki/Miscellaneous_Symbols
# http://www.w3schools.com/charsets/ref_utf_symbols.asp
#
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.config import config
_ = glocale.translation.sgettext
# pylint: disable=superfluous-parens
# pylint: disable=anomalous-unicode-escape-in-string
class Symbols(object):
# genealogical symbols
SYMBOL_FEMALE = 0
SYMBOL_MALE = 1
SYMBOL_ASEXUAL_SEXLESS = 2 # Unknown
SYMBOL_LESBIAN = 3
SYMBOL_MALE_HOMOSEXUAL = 4
SYMBOL_HETEROSEXUAL = 5
SYMBOL_HERMAPHRODITE = 6
SYMBOL_TRANSGENDER = 7
SYMBOL_NEUTER = 8
SYMBOL_ILLEGITIM = 9
SYMBOL_BIRTH = 10
SYMBOL_BAPTISM = 11 # CHRISTENING
SYMBOL_ENGAGED = 12
SYMBOL_MARRIAGE = 13
SYMBOL_DIVORCE = 14
SYMBOL_UNMARRIED_PARTNERSHIP = 15
SYMBOL_BURIED = 16
SYMBOL_CREMATED = 17 # Funeral urn
SYMBOL_KILLED_IN_ACTION = 18
SYMBOL_EXTINCT = 19
# genealogical death symbols
DEATH_SYMBOL_NONE = 0
DEATH_SYMBOL_X = 1
DEATH_SYMBOL_SKULL = 2
DEATH_SYMBOL_ANKH = 3
DEATH_SYMBOL_ORTHODOX_CROSS = 4
DEATH_SYMBOL_CHI_RHO = 5
DEATH_SYMBOL_LORRAINE_CROSS = 6
DEATH_SYMBOL_JERUSALEM_CROSS = 7
DEATH_SYMBOL_STAR_CRESCENT = 8
DEATH_SYMBOL_WEST_SYRIAC_CROSS = 9
DEATH_SYMBOL_EAST_SYRIAC_CROSS = 10
DEATH_SYMBOL_HEAVY_GREEK_CROSS = 11
DEATH_SYMBOL_LATIN_CROSS = 12
DEATH_SYMBOL_SHADOWED_LATIN_CROSS = 13
DEATH_SYMBOL_MALTESE_CROSS = 14
DEATH_SYMBOL_STAR_OF_DAVID = 15
DEATH_SYMBOL_DEAD = 16
def __init__(self):
self.symbols = None
self.all_symbols = [
# Name UNICODE SUBSTITUTION
(_("Female"), '\u2640', ""),
(_("Male"), '\u2642', ""),
(_("Asexuality, sexless, genderless"), '\u26aa', ""),
(_("Lesbianism"), '\u26a2', ""),
(_("Male homosexuality"), '\u26a3', ""),
(_("Heterosexuality"), '\u26a4', ""),
(_("Transgender, hermaphrodite (in entomology)"), '\u26a5', ""),
(_("Transgender"), '\u26a6', ""),
(_("Neuter"), '\u26b2', ""),
(_("Illegitimate"), '\u229b', ""),
(_("Birth"), '\u002a', config.get('utf8.birth-symbol')),
(_("Baptism/Christening"), '\u007e',
config.get('utf8.baptism-symbol')),
(_("Engaged"), '\u26ac', config.get('utf8.engaged-symbol')),
(_("Marriage"), '\u26ad', config.get('utf8.marriage-symbol')),
(_("Divorce"), '\u26ae', config.get('utf8.divorce-symbol')),
(_("Unmarried partnership"), '\u26af',
config.get('utf8.partner-symbol')),
(_("Buried"), '\u26b0', config.get('utf8.buried-symbol')),
(_("Cremated/Funeral urn"), '\u26b1',
config.get('utf8.cremated-symbol')),
(_("Killed in action"), '\u2694', config.get('utf8.killed-symbol')),
(_("Extinct"), '\u2021', "")]
# The following is used in the global preferences in the display tab.
# Name UNICODE SUBSTITUTION
self.death_symbols = [(_("Nothing"), "", ""),
("x", "x", "x"),
(_("Skull and crossbones"), "\u2620",
config.get('utf8.dead-symbol')),
(_("Ankh"), "\u2625",
config.get('utf8.dead-symbol')),
(_("Orthodox cross"), "\u2626",
config.get('utf8.dead-symbol')),
(_("Chi rho"), "\u2627",
config.get('utf8.dead-symbol')),
(_("Cross of Lorraine"), "\u2628",
config.get('utf8.dead-symbol')),
(_("Cross of Jerusalem"), "\u2629",
config.get('utf8.dead-symbol')),
(_("Star and crescent"), "\u262a",
config.get('utf8.dead-symbol')),
(_("West Syriac cross"), "\u2670",
config.get('utf8.dead-symbol')),
(_("East Syriac cross"), "\u2671",
config.get('utf8.dead-symbol')),
(_("Heavy Greek cross"), "\u271a",
config.get('utf8.dead-symbol')),
(_("Latin cross"), "\u271d",
config.get('utf8.dead-symbol')),
(_("Shadowed White Latin cross"), "\u271e",
config.get('utf8.dead-symbol')),
(_("Maltese cross"), "\u2720",
config.get('utf8.dead-symbol')),
(_("Star of David"), "\u2721",
config.get('utf8.dead-symbol')),
(_("Dead"), ("Dead"), _("Dead"))]
#
# functions for general symbols
#
def get_symbol_for_html(self, symbol):
""" return the html string like '⚪' """
return '&#%d;' % ord(self.all_symbols[symbol][1])
def get_symbol_name(self, symbol):
"""
Return the name of the symbol.
"""
return self.all_symbols[symbol][0]
def get_symbol_for_string(self, symbol):
""" return the utf-8 character like '\u2670' """
return self.all_symbols[symbol][1]
def get_symbol_fallback(self, symbol):
"""
Return the replacement string.
This is used if the utf-8 symbol in not present within a font.
"""
return self.all_symbols[symbol][2]
#
# functions for death symbols
#
def get_death_symbols(self):
"""
Return the list of death symbols.
This is used in the global preference to choose which symbol we'll use.
"""
return self.death_symbols
def get_death_symbol_name(self, symbol):
"""
Return the name of the symbol.
"""
return self.death_symbols[symbol][0]
def get_death_symbol_for_html(self, symbol):
"""
return the html string like '⚪'.
"""
return '&#%d;' % ord(self.death_symbols[symbol][1])
def get_death_symbol_for_char(self, symbol):
"""
Return the utf-8 character for the symbol.
"""
return self.death_symbols[symbol][1]
def get_death_symbol_fallback(self, symbol):
"""
Return the string replacement for the symbol.
"""
return self.death_symbols[symbol][2]
#
# functions for all symbols
#
def get_how_many_symbols(self):
return len(self.death_symbols) + len(self.all_symbols) - 4
|
gramps-project/gramps
|
gramps/gen/utils/symbols.py
|
Python
|
gpl-2.0
| 7,831
| 0.001022
|
import sys
sys.path.insert(0, "../")
import unittest
from dip.typesystem import DNull, DBool, DInteger, DString, DList
from dip.compiler import BytecodeCompiler
from dip.interpreter import VirtualMachine
from dip.namespace import Namespace
class TestInterpreter(unittest.TestCase):
def _execute_simple(self, code, data):
result = [None]
def getresult(val):
result[0] = val
vm = VirtualMachine([], getresult)
globalns = Namespace("globals")
ctx = BytecodeCompiler("main", code, data, namespace=globalns)
globalns.set_func("main", ctx.mkfunc())
vm.setglobals(globalns)
vm.run(pass_argv=False)
return result[0]
def test_add(self):
result = self._execute_simple("""
ADD 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(32), # data0
DInteger.new_int(64), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 96)
def test_sub(self):
result = self._execute_simple("""
SUB 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(64), # data0
DInteger.new_int(32), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 32)
def test_mul(self):
result = self._execute_simple("""
MUL 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(64), # data0
DInteger.new_int(32), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 2048)
def test_div(self):
result = self._execute_simple("""
DIV 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(64), # data0
DInteger.new_int(2), # data1
DInteger(), # data2
])
self.assertEqual(result.int_py(), 32)
def test_jump(self):
result = self._execute_simple("""
JMP 2 # 0
RET 0 # 1
RET 1 # 2
""", [
DInteger.new_int(16), # data0
DInteger.new_int(32), # data1
])
self.assertEqual(result.int_py(), 32)
def test_len(self):
result = self._execute_simple("""
LEN 0 1 # 0
RET 1 # 1
""", [
DString.new_str("neat"), # data0
DInteger(), # data1
])
self.assertEqual(result.int_py(), 4)
def test_eq(self):
result = self._execute_simple("""
EQ 0 1 2 # 0
RET 2 # 1
""", [
DInteger.new_int(4), # data0
DInteger.new_int(5), # data1
DBool(), # data2
])
self.assertEqual(result.int_py(), False)
result = self._execute_simple("""
EQ 0 1 2 # 0
RET 2 # 1
""", [
DString.new_str("neat"), # data0
DString.new_str("neat"), # data1
DBool(), # data2
])
self.assertEqual(result.int_py(), True)
def test_branch(self):
result = self._execute_simple("""
EQ 0 1 2 # 0
BF 2 3 # 1
RET 0 # 2
LABEL :some_label # 3
RET 3 # 4
""", [
DInteger.new_int(4), # data0
DInteger.new_int(5), # data1
DBool(), # data2
DInteger.new_int(999), # data3
])
self.assertEqual(result.int_py(), 999)
def test_lists(self):
result = self._execute_simple("""
LIST_NEW 0
LIST_ADD 0 1 # 0 data0.append(data1)
LIST_ADD 0 1 # 1 data0.append(data1)
LIST_ADD 0 2 # 2 data0.append(data2)
LEN 0 3 # 3 data3 = len(data0)
EQ 3 5 6 # 4 data6 = (data3 == data5)
LIST_REM 0 4 # 5 data0.remove(data4 (represents an index))
LEN 0 3 # 6 data3 = len(data0)
NEQ 3 5 7 # 7 data7 = (data3 != data5)
EQ 6 7 8 # 8 data8 = (data6 == data7)
RET 8 # 9 return data8
""", [
DList(), # data0, list
DInteger.new_int(5), # data1, fake value to add to the list
DString.new_str("hi"), # data2, fake value to add to the list
DInteger(), # data3, list length
DInteger.new_int(2), # data4, list index
DInteger.new_int(3), # data5, expected list length
DBool(), # data6, comp1
DBool(), # data7, comp2
DBool(), # data8, output
])
self.assertEqual(result.int_py(), True)
if __name__ == '__main__':
unittest.main()
|
juddc/Dipper
|
dip/tests/test_interpreter.py
|
Python
|
mit
| 5,801
| 0.001896
|
from __future__ import absolute_import
import re
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from uuid import uuid1
from sentry import options
from sentry.models import AuditLogEntryEvent, Project, Team
from sentry.web.forms.fields import (
CustomTypedChoiceField,
RangeField,
OriginsField,
)
from sentry.web.frontend.base import ProjectView
BLANK_CHOICE = [("", "")]
class EditProjectForm(forms.ModelForm):
name = forms.CharField(
label=_('Project Name'),
max_length=200,
widget=forms.TextInput(attrs={'placeholder': _('Production')})
)
slug = forms.SlugField(
label=_('Short name'),
help_text=_('A unique ID used to identify this project.'),
)
team = CustomTypedChoiceField(choices=(), coerce=int, required=False)
origins = OriginsField(
label=_('Allowed Domains'),
required=False,
help_text=_('Separate multiple entries with a newline.')
)
token = forms.CharField(
label=_('Security token'),
help_text=_(
'Outbound requests matching Allowed Domains will have the header "{token_header}: {token}" appended.'
),
required=True,
)
token_header = forms.CharField(
label=_('Security token header'),
help_text=_(
'Outbound requests matching Allowed Domains will have the header "{token_header}: {token}" appended.'
),
widget=forms.TextInput(attrs={
'placeholder': _('X-Sentry-Token'),
}),
required=False,
)
verify_ssl = forms.BooleanField(
label=_('Verify TLS/SSL'),
help_text=_('Outbound requests will verify TLS (sometimes known as SSL) connections.'),
required=False,
)
resolve_age = RangeField(
label=_('Auto resolve'),
required=False,
min_value=0,
max_value=720,
step_value=1,
help_text=_(
'Automatically resolve an issue if it hasn\'t been seen for this amount of time.'
)
)
scrub_data = forms.BooleanField(
label=_('Data Scrubber'), help_text=_('Enable server-side data scrubbing.'), required=False
)
scrub_defaults = forms.BooleanField(
label=_('Use Default Scrubbers'),
help_text=_(
'Apply default scrubbers to prevent things like passwords and credit cards from being stored.'
),
required=False
)
sensitive_fields = forms.CharField(
label=_('Additional sensitive fields'),
help_text=_(
'Additional field names to match against when scrubbing data. Separate multiple entries with a newline.'
),
widget=forms.Textarea(
attrs={
'placeholder': mark_safe(_('e.g. email')),
'class': 'span8',
'rows': '3',
}
),
required=False,
)
safe_fields = forms.CharField(
label=_('Safe fields'),
help_text=_(
'Field names which data scrubbers should ignore. '
'Separate multiple entries with a newline.'
),
widget=forms.Textarea(
attrs={
'placeholder': mark_safe(_('e.g. email')),
'class': 'span8',
'rows': '3',
}
),
required=False,
)
scrub_ip_address = forms.BooleanField(
label=_('Don\'t store IP Addresses'),
help_text=_('Prevent IP addresses from being stored for new events.'),
required=False
)
# JavaScript options
scrape_javascript = forms.BooleanField(
label=_('Enable JavaScript source fetching'),
help_text=_('Allow Sentry to scrape missing JavaScript source context when possible.'),
required=False,
)
# Options that are overridden by Organization level settings
org_overrides = ('scrub_data', 'scrub_defaults', 'scrub_ip_address')
default_environment = forms.CharField(
label=_('Default Environment'),
help_text=_('The default selected environment when viewing issues.'),
widget=forms.TextInput(attrs={'placeholder': _('e.g. production')}),
required=False,
)
mail_subject_prefix = forms.CharField(
label=_('Subject Prefix'),
required=False,
help_text=_('Choose a custom prefix for emails from this project.')
)
class Meta:
fields = ('name', 'team', 'slug')
model = Project
def __init__(self, request, organization, team_list, data, instance, *args, **kwargs):
# First, we need to check for the value overrides from the Organization options
# We need to do this before `initial` gets passed into the Form.
disabled = []
if 'initial' in kwargs:
for opt in self.org_overrides:
value = bool(organization.get_option('sentry:require_%s' % (opt, ), False))
if value:
disabled.append(opt)
kwargs['initial'][opt] = value
super(EditProjectForm, self).__init__(data=data, instance=instance, *args, **kwargs)
self.organization = organization
self.team_list = team_list
self.fields['team'].choices = self.get_team_choices(team_list, instance.team)
self.fields['team'].widget.choices = self.fields['team'].choices
# After the Form is initialized, we now need to disable the fields that have been
# overridden from Organization options.
for opt in disabled:
self.fields[opt].widget.attrs['disabled'] = 'disabled'
def get_team_label(self, team):
return '%s (%s)' % (team.name, team.slug)
def get_team_choices(self, team_list, default=None):
sorted_team_list = sorted(team_list, key=lambda x: x.name)
choices = []
for team in sorted_team_list:
# TODO: optimize queries
choices.append((team.id, self.get_team_label(team)))
if default is None:
choices.insert(0, (-1, mark_safe('–' * 8)))
elif default not in sorted_team_list:
choices.insert(0, (default.id, self.get_team_label(default)))
return choices
def clean_sensitive_fields(self):
value = self.cleaned_data.get('sensitive_fields')
if not value:
return
return filter(bool, (v.lower().strip() for v in value.split('\n')))
def clean_safe_fields(self):
value = self.cleaned_data.get('safe_fields')
if not value:
return
return filter(bool, (v.lower().strip() for v in value.split('\n')))
def clean_team(self):
value = self.cleaned_data.get('team')
if not value:
return
# TODO: why is this not already an int?
value = int(value)
if value == -1:
return
if self.instance.team and value == self.instance.team.id:
return self.instance.team
for team in self.team_list:
if value == team.id:
return team
raise forms.ValidationError('Unable to find chosen team')
def clean_slug(self):
slug = self.cleaned_data.get('slug')
if not slug:
return
other = Project.objects.filter(
slug=slug, organization=self.organization
).exclude(id=self.instance.id).first()
if other is not None:
raise forms.ValidationError(
'Another project (%s) is already '
'using that slug' % other.name
)
return slug
def clean_token(self):
token = self.cleaned_data.get('token')
if not token:
return
token_re = r'^[-a-zA-Z0-9+/= ]{1,255}$'
if not re.match(token_re, token):
raise forms.ValidationError('Invalid security token, must be: %s' % token_re)
return token
def clean_token_header(self):
token_header = self.cleaned_data.get('token_header')
if not token_header:
return
header_re = r'^[a-zA-Z0-9-]{1,20}$'
if not re.match(header_re, token_header):
raise forms.ValidationError('Invalid header value, must be: %s' % header_re)
return token_header
class ProjectSettingsView(ProjectView):
required_scope = 'project:write'
def get_form(self, request, project):
organization = project.organization
team_list = [
t for t in Team.objects.get_for_user(
organization=organization,
user=request.user,
) if request.access.has_team_scope(t, self.required_scope)
]
# TODO(dcramer): this update should happen within a lock
security_token = project.get_option('sentry:token', None)
if security_token is None:
security_token = uuid1().hex
project.update_option('sentry:token', security_token)
return EditProjectForm(
request,
organization,
team_list,
request.POST or None,
instance=project,
initial={
'origins':
'\n'.join(project.get_option('sentry:origins', ['*'])),
'token':
security_token,
'token_header':
project.get_option('sentry:token_header'),
'verify_ssl':
bool(project.get_option('sentry:verify_ssl', False)),
'resolve_age':
int(project.get_option('sentry:resolve_age', 0)),
'scrub_data':
bool(project.get_option('sentry:scrub_data', True)),
'scrub_defaults':
bool(project.get_option('sentry:scrub_defaults', True)),
'sensitive_fields':
'\n'.join(project.get_option('sentry:sensitive_fields', None) or []),
'safe_fields':
'\n'.join(project.get_option('sentry:safe_fields', None) or []),
'scrub_ip_address':
bool(project.get_option('sentry:scrub_ip_address', False)),
'scrape_javascript':
bool(project.get_option('sentry:scrape_javascript', True)),
'default_environment':
project.get_option('sentry:default_environment'),
'mail_subject_prefix':
project.get_option('mail:subject_prefix', options.get('mail.subject-prefix')),
},
)
def handle(self, request, organization, team, project):
form = self.get_form(request, project)
if form.is_valid():
project = form.save()
for opt in (
'origins', 'token', 'token_header', 'verify_ssl', 'resolve_age', 'scrub_data',
'scrub_defaults', 'sensitive_fields', 'safe_fields', 'scrub_ip_address',
'scrape_javascript', 'default_environment', 'mail_subject_prefix',
):
opt_key = 'sentry:{}'.format(opt)
# Value can't be overridden if set on the org level
if opt in form.org_overrides and organization.get_option(opt_key, False):
continue
if opt == 'mail_subject_prefix':
key = 'mail:subject_prefix'
else:
key = 'sentry:%s' % (opt, )
value = form.cleaned_data.get(opt)
if value is None:
project.delete_option(key)
else:
project.update_option(key, value)
self.create_audit_entry(
request,
organization=organization,
target_object=project.id,
event=AuditLogEntryEvent.PROJECT_EDIT,
data=project.get_audit_log_data(),
)
messages.add_message(
request, messages.SUCCESS, _('Changes to your project were saved.')
)
redirect = reverse(
'sentry-manage-project', args=[project.organization.slug, project.slug]
)
return HttpResponseRedirect(redirect)
context = {
'form': form,
'page': 'details',
}
return self.respond('sentry/projects/manage.html', context)
|
jean/sentry
|
src/sentry/web/frontend/project_settings.py
|
Python
|
bsd-3-clause
| 12,517
| 0.001997
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ReadTensorboardBlobData
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_sync]
from google.cloud import aiplatform_v1
def sample_read_tensorboard_blob_data():
# Create a client
client = aiplatform_v1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ReadTensorboardBlobDataRequest(
time_series="time_series_value",
)
# Make the request
stream = client.read_tensorboard_blob_data(request=request)
# Handle the response
for response in stream:
print(response)
# [END aiplatform_v1_generated_TensorboardService_ReadTensorboardBlobData_sync]
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_v1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py
|
Python
|
apache-2.0
| 1,581
| 0.001265
|
import os
import re
from . import core
@core.rule
def executable(
name, sources=None, include=None, define=None, flags=None, links=None,
compiler=None, warnings_are_errors=False, scan=True, debug=True,
objects=None, linkflags=None
):
if compiler is None:
compiler, toolchain = _get_default_compiler()
else:
toolchain = _get_toolchain(compiler)
if toolchain is None:
raise ValueError('toolchain could not be detected')
include = list(include) if include else []
define = dict(define) if define else {}
flags = list(flags) if flags else []
objects = list(objects) if objects else []
linkflags = list(linkflags) if linkflags else []
static = []
shared = []
if links:
for link in links:
if isinstance(link, str):
lib = find_static_library(link)
if lib is None:
raise ValueError('lib could not be found: ' + link)
static.append(lib)
elif getattr(link, 'type') == 'cpp.static_library':
include.extend(link.headers)
static.append(core.resolve(link.output))
elif getattr(link, 'type') == 'cpp.shared_library':
include.extend(link.headers)
if toolchain is GNU:
shared.append(core.resolve(link.output))
else:
shared.append(core.resolve(link.msvc_lib))
else:
raise TypeError('invalid entry in links: "{}"'.format(link))
if toolchain is MSVC:
name += '.exe'
name = core.build(name)
for source in sources:
obj = object(
sources=[source],
include=include,
define=define,
flags=flags,
compiler=compiler,
error_warnings=warnings_are_errors,
scan=scan,
debug=debug
)
objects.append(core.resolve(obj.output))
yield core.publish(
inputs=objects + static + shared,
message='Link {}'.format(name),
outputs=[name],
result={
'type': 'cpp.executable'
},
check=linkflags
)
if toolchain is GNU:
command = [compiler, '-o', name]
command.extend(objects)
command.extend(static)
for s in shared:
command.append(s)
command.append('-Wl,-rpath,' + os.path.dirname(core.absolute(s)))
command.append('-lstdc++')
command.extend(linkflags)
core.call(command)
elif toolchain is MSVC:
command = [compiler, '/Fe' + name, '/nologo']
command.extend(objects + shared + static)
command.extend(linkflags)
core.call(command, env=_msvc_get_cl_env(compiler))
@core.rule
def static_library(
name=None, sources=None, include=None, define=None, flags=None,
headers=None, compiler=None, warnings_are_errors=False, scan=True,
debug=True, objects=None, linkflags=None
):
if compiler is None:
compiler, toolchain = _get_default_compiler()
else:
toolchain = _get_toolchain(compiler)
if toolchain is None:
raise ValueError('toolchain could not be detected')
if headers is None:
headers = []
if sources is None:
sources = []
if objects is None:
objects = []
linkflags = list(linkflags) if linkflags else []
for source in sources:
obj = object(
sources=[source],
compiler=compiler,
scan=scan,
include=include,
define=define,
flags=flags,
error_warnings=warnings_are_errors,
debug=debug
)
objects.append(obj.output)
if name is None:
name = core.intermediate(core.checksum(
sources, compiler, toolchain, include, define, headers))
else:
name = core.build(name)
if toolchain is MSVC:
name += '.lib'
elif toolchain is GNU:
name += '.a'
yield core.publish(
inputs=objects,
message='Static {}'.format(name),
outputs=[name],
result={
'type': 'cpp.static_library',
'headers': core.absolute(core.resolve(headers))
},
check=linkflags
)
if toolchain is GNU:
archiver = core.which('ar')
command = [archiver, 'rs', name]
command.extend(objects)
command.extend(linkflags)
core.call(command)
elif toolchain is MSVC:
archiver = os.path.join(os.path.dirname(compiler), 'lib.exe')
command = [archiver, '/OUT:' + name]
command.extend(objects)
command.extend(linkflags)
core.call(command, env=_msvc_get_cl_env(compiler))
@core.rule
def shared_library(
name, sources, include=None, define=None, flags=None, headers=None,
compiler=None, warnings_are_errors=False, scan=True, msvc_lib=False,
debug=True, linkflags=None
):
if compiler is None:
compiler, toolchain = _get_default_compiler()
else:
toolchain = _get_toolchain(compiler)
if toolchain is None:
raise ValueError('toolchain could not be detected')
if headers is None:
headers = []
linkflags = list(linkflags) if linkflags else []
if flags is None:
flags = []
if toolchain is GNU:
flags.append('-fPIC')
if define is None:
define = {}
define['DLL_EXPORT'] = 1
objects = []
for source in sources:
obj = object(
sources=[source],
compiler=compiler,
scan=scan,
include=include,
define=define,
flags=flags,
error_warnings=warnings_are_errors,
debug=debug
)
objects.append(obj.output)
if toolchain is MSVC:
lib = name + '.lib'
if msvc_lib:
lib = core.build(lib)
else:
lib = core.intermediate(lib)
name = core.build(name + '.dll')
else:
lib = None
head, tail = os.path.split(name)
name = core.build(os.path.join(head, 'lib' + tail + '.so'))
yield core.publish(
inputs=objects,
message='Shared {}'.format(name),
outputs=[name, lib] if lib else [name],
result={
'type': 'cpp.shared_library',
'msvc_lib': core.absolute(lib),
'headers': core.absolute(core.resolve(headers)),
'output': core.absolute(name)
},
check=linkflags
)
if toolchain is GNU:
command = [compiler, '-shared', '-o', name]
command.extend(objects)
command.append('-Wl,-soname,' + os.path.basename(name))
command.extend(linkflags)
core.call(command)
elif toolchain is MSVC:
command = [compiler, '/Fe' + name, '/nologo', '/LD']
command.extend(objects)
command.extend(linkflags)
core.call(command, env=_msvc_get_cl_env(compiler))
base = os.path.splitext(name)[0]
if not msvc_lib:
origin = base + '.lib'
if os.path.isfile(lib):
os.remove(lib)
os.rename(origin, lib)
os.remove(base + '.exp')
else:
raise NotImplementedError
@core.rule
def object(
name=None, sources=None, include=None, define=None, flags=None,
compiler=None, error_warnings=False, scan=True, debug=True, depend=None
):
if isinstance(sources, str):
raise TypeError('sources must not be a string - try to use a list')
if not sources:
raise ValueError('sources must not be empty')
sources = core.resolve(sources)
include = list(include) if include else []
define = dict(define) if define else {}
flags = list(flags) if flags else []
depend = list(depend) if depend else []
if compiler is None:
compiler, toolchain = _get_default_compiler()
else:
toolchain = _get_toolchain(compiler)
if toolchain is None:
raise ValueError('toolchain could not be detected')
if name is None:
name = core.intermediate(core.checksum(
core.absolute(sources), compiler)[:16])
else:
name = core.build(name)
if toolchain is GNU:
name += '.o'
elif toolchain is MSVC:
name += '.obj'
yield core.publish(
inputs=sources + [compiler] + depend,
message='Compile ' + ', '.join(sources),
outputs=[name],
check=[include, define, flags, error_warnings, scan, debug],
result={
'type': 'cpp.object',
'include': include,
'define': define,
'flags': flags,
'compiler': compiler,
}
)
for identifier, value in define.items():
if isinstance(value, str):
define[identifier] = '"{}"'.format(value)
elif value is True:
define[identifier] = 'true'
elif value is False:
define[identifier] = 'false'
elif isinstance(value, (int, float)):
pass
else:
raise TypeError('unsupported define type: {}'.format(type(value)))
if toolchain is GNU:
command = [compiler, '-c', '-o', name, '-x', 'c++', '-std=c++11']
command.extend(sources)
for directory in include:
command.extend(['-I', directory])
# Enable most warnings. Option to change this?
command.append('-Wall')
if error_warnings:
command.append('-Werror')
if debug:
command.append('-g')
else:
command.append('-O3')
command.append('-DNDEBUG')
for identifier, value in define.items():
command.append('-D{}={}'.format(identifier, value))
if scan:
depfile = core.temporary(core.random('.d'))
command.extend(['-MD', '-MF', depfile])
else:
depfile = None
if _gnu_supports_colors(compiler):
command.append('-fdiagnostics-color')
command.extend(flags)
output = core.call(command)
if scan:
# TODO: Good parsing.
with open(depfile) as file:
content = file.read()
used = {
os.path.abspath(x) for x in
content[content.find(':')+1:].replace('\\\n', '\n').split()
}
# TODO: No difference!!
used.difference_update(core.absolute(sources))
used.difference_update(core.absolute(depend))
else:
used = None
yield core.deposit(inputs=used, warnings=output or None)
elif toolchain is MSVC:
command = [compiler, '/c', '/Fo' + name, '/nologo']
command.extend(sources)
for directory in include:
command.extend(['/I' + directory])
if scan:
command.append('/showIncludes')
for identifier, value in define.items():
command.append('/D{}={}'.format(identifier, value))
# TODO: Option to set c++ standard.
# command.append('/std:' + standard)
# TODO: Figure out debug / relase
# === DEBUG ===
# command.append('/ZI') Enable nice debug mode?
# command.append('/Od') Disable optimizations for debug
# command.append('/Gm') Enable minimal rebuild?
# command.append('/RTC1') Run-time error checks
# /MDd
# === RELEASE ===
# command.append('/Ox') Full Optimization or /Oi?
# /Zi Debug information
# /GL Breaks object-linking? Whole prog optimization
# command.append('/O2') Optimize for speed
command.append('/W4') # Enable most warnings.
if error_warnings:
command.append('/WX') # All warnings as errors.
command.append('/EHsc') # Specify exception handling model
command.append('/sdl') # Additional security warnings
command.append('/TP') # Assume C++ sources
command.extend(flags)
try:
output = core.call(command, env=_msvc_get_cl_env(compiler))
except core.CallError as exc:
exc.output = _msvc_strip_includes(exc.output)
raise
if scan:
used = _msvc_extract_includes(output)
else:
used = None
yield core.deposit(
inputs=used,
warnings=_msvc_strip_includes(output).strip() or None
)
def find_static_library(name):
if core.windows:
return _find('{}.lib'.format(name))
else:
return _find('lib{}.a'.format(name))
def find_shared_library(name):
if core.windows:
return _find('{}.dll'.format(name))
else:
return _find('lib{}.so'.format(name))
def get_default_toolchain():
return _get_default_compiler()[1]
GNU = 'GNU'
MSVC = 'MSVC'
def _find(name):
if core.windows:
env = _msvc_get_cl_env(_get_default_compiler()[0])
for directory in env['LIB'].split(os.pathsep):
path = os.path.join(directory, name)
if os.path.isfile(path):
return path
else:
architectures = ['x86_64-linux-gnu', 'i386-linux-gnu']
env_path = os.environ.get('PATH', '').split(os.pathsep)
for directory in env_path:
if directory.endswith('bin') or directory.endswith('sbin'):
directory = os.path.normpath(os.path.dirname(directory))
for arch in architectures:
path = os.path.join(directory, 'lib', arch, name)
if os.path.isfile(path):
return path
path = os.path.join(directory, arch, name)
if os.path.isfile(path):
return path
path = os.path.join(directory, 'lib', name)
if os.path.isfile(path):
return path
path = os.path.join(directory, name)
if os.path.isfile(path):
return path
@core.cache
def _get_default_compiler():
compiler = os.environ.get('CXX')
if compiler is not None:
compiler = core.which(compiler)
if compiler is None:
raise FileNotFoundError('CXX compiler does not exist')
toolchain = _get_toolchain(compiler)
elif core.windows:
compiler = _msvc_find_cl()
toolchain = _get_toolchain(compiler)
if compiler is None:
compiler = core.which('c++') or core.which('g++')
toolchain = _get_toolchain(compiler)
if compiler is None:
ValueError('compiler could not be determined')
elif toolchain is None:
raise ValueError('toolchain could not be detected')
core.debug('Detected C++ compiler: {} [{}]'.format(compiler, toolchain))
return compiler, toolchain
@core.cache
def _get_toolchain(compiler):
if compiler is None:
return None
if 'g++' in compiler or 'c++' in compiler:
return GNU
if 'clang' in compiler:
return GNU
if 'cl.exe' in compiler:
return MSVC
@core.cache
def _msvc_get_cl_env(cl):
bat = os.path.normpath(
os.path.join(os.path.dirname(cl), '../vcvarsall.bat'))
if os.path.isfile(bat):
return _msvc_extract_vcvars(bat)
bat = os.path.normpath(os.path.join(
os.path.dirname(cl),
'../../../../../../Auxiliary/Build/vcvarsall.bat')
)
if os.path.isfile(bat):
return _msvc_extract_vcvars(bat)
raise ValueError('could not extract env')
@core.cache
def _msvc_extract_vcvars(vcvars):
core.debug('Extracting environment of {}'.format(vcvars))
helper = core.temporary(core.random('.bat'))
with open(helper, 'w') as stream:
stream.write('\n'.join([
'@call "{vcvars}" {mode}',
'@echo PATH=%PATH%',
'@echo INCLUDE=%INCLUDE%',
'@echo LIB=%LIB%;%LIBPATH%'
]).format(vcvars=vcvars, mode='x86'))
cmd = core.which('cmd.exe')
output = core.call([cmd, '/C', helper])
env = os.environ.copy()
steps = 0
for line in output.strip().splitlines():
if any(line.startswith(var) for var in ('PATH=', 'INCLUDE=', 'LIB=')):
key, value = line.split('=', maxsplit=1)
env[key] = value[:-1]
steps += 1
if steps != 3:
raise RuntimeError('msvc auto configuration failed: {}' + output)
return env
def _msvc_find_cl():
path = os.path.join(
os.environ.get('ProgramFiles(x86)', r'C:\Program Files (x86)'),
r'Microsoft Visual Studio\2017\BuildTools\VC\Auxiliary\Build',
'vcvarsall.bat'
)
if os.path.isfile(path):
env = _msvc_extract_vcvars(path)
cl = core.which('cl.exe', env=env)
if cl is None:
raise FileNotFoundError('expected to find cl')
return cl
for version in [140, 120, 110, 100, 90, 80, 71, 70]:
tools = os.environ.get('VS{}COMNTOOLS'.format(version))
if not tools:
continue
cl = os.path.normpath(os.path.join(tools, '../../VC/bin/cl.exe'))
if os.path.isfile(cl):
return cl
def _msvc_strip_includes(output):
regex = re.compile(r'^[^:]+: [^:]+: +(.*)$')
result = []
for line in output.splitlines():
match = regex.match(line)
if match:
path = match.group(1)
if not os.path.isfile(path):
result.append(line)
return '\n'.join(result) + '\n'
def _msvc_extract_includes(output):
regex = re.compile(r'^[^:]+: [^:]+: +(.*)$')
used = []
for line in output.splitlines():
match = regex.match(line)
if match:
path = match.group(1)
if os.path.isfile(path):
used.append(path)
return used
@core.cache
def _gnu_supports_colors(compiler):
try:
core.call([compiler, '-fdiagnostics-color'])
except core.CallError as exc:
return ('unknown argument' not in exc.output and
'unrecognized command line option' not in exc.output)
|
jachris/cook
|
cook/cpp.py
|
Python
|
mit
| 18,097
| 0
|
from scrapy.contrib.exporter import CsvItemExporter
from scrapy.conf import settings
class SlybotCSVItemExporter(CsvItemExporter):
def __init__(self, *args, **kwargs):
kwargs['fields_to_export'] = settings.getlist('CSV_EXPORT_FIELDS') or None
super(SlybotCSVItemExporter, self).__init__(*args, **kwargs)
|
lodow/portia-proxy
|
slybot/slybot/exporter.py
|
Python
|
bsd-3-clause
| 325
| 0.006154
|
#!/usr/bin/env python2.7
import sys
for line in open(sys.argv[1]):
cut=line.split('\t')
if len(cut)<11: continue
print ">"+cut[0]
print cut[9]
print "+"
print cut[10]
|
ursky/metaWRAP
|
bin/metawrap-scripts/sam_to_fastq.py
|
Python
|
mit
| 173
| 0.052023
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Stardust Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test spending coinbase transactions.
# The coinbase transaction in block N can appear in block
# N+100... so is valid in the mempool when the best block
# height is N+99.
# This test makes sure coinbase spends that will be mature
# in the next block are accepted into the memory pool,
# but less mature coinbase spends are NOT.
#
from test_framework.test_framework import StardustTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(StardustTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
|
ctwiz/stardust
|
qa/rpc-tests/mempool_spendcoinbase.py
|
Python
|
mit
| 2,474
| 0.005659
|
import unittest
from unittest.mock import patch
from app.main.service import GitHubUserService
@patch("app.main.service.github")
class TestGitHubUserService(unittest.TestCase):
def setUp(self):
self.test_user = "test"
self.retrieved_repos_return = [
{
"fork": False,
"name": "test_non_fork",
"pull_url": "http://localhost/non_fork/pulls",
"url": "https://localhost/non_fork",
"full_name": self.test_user + "/test_non_fork",
"html_url": "https://localhost"
},
{
"fork": True,
"name": "test_fork",
"full_name": self.test_user + "/test_fork",
"url": "https://localhost/child",
"html_url": "https://localhost",
"parent": {
"fork": False,
"name": "parent",
"url": "http://parent",
"full_name": self.test_user + "1/test_parent",
"pull_url": "https://localhost/parent/pulls",
"html_url": "https://localhost/parent"
}
}
]
def test_search_for_users_error(self, github_client):
message = "too many"
github_client.search_for_user.return_value = {"error": message}
assert GitHubUserService.search_for_user("nobody") == message
def test_search_for_users_success(self, github_client):
github_client_return = [{
"avatar_url": "test",
"repos_url": "http://localhost",
"html_url": "https://localhost",
"login": "nobody"
}]
github_client.search_for_user.return_value = github_client_return
found_users = GitHubUserService.search_for_users("nobody")
self.assertEqual(found_users[0].avatar_url, github_client_return[0]["avatar_url"])
self.assertEqual(found_users[0].repos_url, github_client_return[0]["repos_url"])
self.assertEqual(found_users[0].url, github_client_return[0]["html_url"])
self.assertEqual(found_users[0].login, github_client_return[0]["login"])
def test_retrieve_repos_if_fork_with_pr(self, github_client):
def local_mock_retrieve_pulls(url, state):
pulls = [
{
"html_url": "https://localhost/parent/pulls",
"title": "test title",
"user": {
"login": self.test_user
}
}
]
if "parent" in url:
return pulls
else:
pulls[0]["html_url"] = self.retrieved_repos_return[0]["html_url"]
return pulls
# mocks
github_client.retrieve_repos.return_value = self.retrieved_repos_return
github_client.retrieve_repo.side_effect = self.mock_retrieve_repo
github_client.retrieve_pulls.side_effect = local_mock_retrieve_pulls
actual_repos = GitHubUserService.retrieve_repos(self.test_user)
self.assertEqual(2, len(actual_repos))
for repo in actual_repos:
if repo.is_fork:
self.assertTrue("parent" in
repo.pull_requests[0].url,
"The parent pulls are not in the repo: {}"
.format(repo.name))
def test_retrieve_repos_if_fork_without_pr(self, github_client):
def local_mock_retrieve_pulls(url, state):
pulls = [
{
"html_url": "https://localhost/parent/pulls",
"title": "test title",
"user": {
"login": self.test_user
}
}
]
if "parent" in url:
return []
else:
pulls[0]["html_url"] = self.retrieved_repos_return[0]["html_url"]
return pulls
# mocks
github_client.retrieve_repos.return_value = self.retrieved_repos_return
github_client.retrieve_repo.side_effect = self.mock_retrieve_repo
github_client.retrieve_pulls.side_effect = local_mock_retrieve_pulls
actual_repos = GitHubUserService.retrieve_repos(self.test_user)
for repo in actual_repos:
if repo.is_fork:
self.assertIsNone(repo.pull_requests,
"The parent pulls are not in the repo: {}"
.format(repo.name))
def test_retrieve_repos_if_source_with_pr(self, github_client):
def local_mock_retrieve_pulls(url, state):
pulls = [
{
"html_url": "https://localhost/non_fork/pulls",
"title": "test title",
"user": {
"login": self.test_user
}
}
]
return pulls
# mocks
github_client.retrieve_repos.return_value = self.retrieved_repos_return
github_client.retrieve_repo.side_effect = self.mock_retrieve_repo
github_client.retrieve_pulls.side_effect = local_mock_retrieve_pulls
actual_repos = GitHubUserService.retrieve_repos(self.test_user)
self.assertEqual(2, len(actual_repos))
for repo in actual_repos:
if not repo.is_fork:
self.assertTrue("non_fork" in
repo.pull_requests[0].url,
"The non_fork pulls are not in the repo: {}"
.format(repo.name))
def test_retrieve_repos_if_source_without_pr(self, github_client):
def local_mock_retrieve_pulls(url, state):
return []
# mocks
github_client.retrieve_repos.return_value = self.retrieved_repos_return
github_client.retrieve_repo.side_effect = self.mock_retrieve_repo
github_client.retrieve_pulls.side_effect = local_mock_retrieve_pulls
actual_repos = GitHubUserService.retrieve_repos(self.test_user)
self.assertEqual(2, len(actual_repos))
for repo in actual_repos:
if not repo.is_fork:
self.assertIsNone(repo.pull_requests,
"The non_fork pulls are not in the repo: {}"
.format(repo.name))
# -----------------helper mock functions--------------------
def mock_retrieve_repo(self, url):
if "non_fork" in url:
return self.retrieved_repos_return[0]
elif "parent" in url:
return self.retrieved_repos_return[1]["parent"]
else:
return self.retrieved_repos_return[1]
def mock_retrieve_pulls(self, url, state):
pulls = [
{
"html_url": "https://localhost/parent/pulls",
"title": "test title",
"user": {
"login": self.test_user
}
}
]
if "parent" in url:
return pulls
else:
pulls[0]["html_url"] = self.retrieved_repos_return[0]["html_url"]
return pulls
if __name__ == '__main__':
unittest.main()
|
darylmathison/github-user-queries
|
tests/main/test_service.py
|
Python
|
gpl-3.0
| 7,359
| 0.001495
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="histogram.marker.pattern", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/histogram/marker/pattern/_bgcolorsrc.py
|
Python
|
mit
| 433
| 0.002309
|
# -*- coding: utf-8 -*-
#
# sympa documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 25 18:11:49 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from datetime import date
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sympa'
copyright = u'%s, Direction Informatique' % date.today().strftime("%Y")
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sympadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'sympa.tex', u'sympa Documentation',
u'Direction Informatique', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sympa', u'sympa Documentation',
[u'Direction Informatique'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sympa', u'sympa Documentation',
u'Direction Informatique', 'sympa', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
unistra/django-sympa
|
docs/conf.py
|
Python
|
gpl-2.0
| 8,528
| 0.00598
|
from distutils.core import setup
setup(
# Application name:
name="streaker",
# Version number (initial):
version="0.0.1",
# Application author details:
author="Aldi Alimucaj",
author_email="aldi.alimucaj@gmail.com",
# Packages
packages=["streaker"],
scripts=['bin/streaker'],
# Include additional files into the package
include_package_data=True,
# Details
url="http://pypi.python.org/pypi/Streaker_v001/",
#
license="MIT",
description="GitHub streak manipulator",
# long_description=open("README.txt").read(),
# Dependent packages (distributions)
install_requires=[
# "",
],
)
|
aldialimucaj/Streaker
|
setup.py
|
Python
|
mit
| 680
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# document_csv module for OpenERP, Import structure in CSV
# Copyright (C) 2011 SYLEAM (<http://www.syleam.fr/>)
# Christophe CHAUVET <christophe.chauvet@syleam.fr>
# Copyright (C) 2011 Camptocamp (http://www.camptocamp.com)
# Guewen Baconnier
#
# This file is a part of document_csv
#
# document_csv is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# document_csv is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
from osv import fields
class LaunchImport(osv.osv_memory):
_name = 'wizard.launch.import.csv'
_description = 'Interface to launch CSV import'
_rec_name = 'import_list'
def _import_list(self, cr, uid, context=None):
implist_obj = self.pool.get('document.import.list')
doc_ids = implist_obj.search(cr, uid, [('disable', '=', False)])
if doc_ids:
return [(x.id, x.name) for x in implist_obj.browse(cr, uid, doc_ids, context=context)]
return []
_columns = {
'import_list': fields.selection(_import_list, 'List', help='List of available import structure', required=True),
'import_file': fields.binary('Filename', required=True),
'lang_id': fields.many2one('res.lang', 'Language', help='Translation to update.'),
'email_result': fields.char('Email', size=256, help='Email to send notification when import is finished'),
}
def default_get(self, cr, uid, fields_list, context=None):
"""
Retrieve email for this user
"""
if context is None:
context = {}
res = super(LaunchImport, self).default_get(cr, uid, fields_list, context=context)
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
res['email_result'] = user.user_email or ''
if context.get('lang'):
res['lang_id'] = self.pool.get('res.lang').search(cr, uid, [('code', '=', context['lang'])], context=context)
return res
def launch_import(self, cr, uid, ids, context=None):
"""
Save file, and execute importation
"""
if context is None:
context = {}
cur = self.browse(cr, uid, ids[0], context=context)
ctx = context.copy()
if cur.lang_id:
ctx.update({'lang': cur.lang_id.code})
self.pool.get('ir.attachment').import_csv(cr, uid, int(cur.import_list), cur.import_file, cur.email_result, context=ctx)
return {'type': 'ir.actions.act_window_close'}
LaunchImport()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
syleam/document_csv
|
wizard/launch.py
|
Python
|
gpl-3.0
| 3,320
| 0.002711
|
from sklearn.cluster import MiniBatchKMeans
import numpy as np
import json
import os
from texta.settings import MODELS_DIR
class WordCluster(object):
"""
WordCluster object to cluster Word2Vec vectors using MiniBatchKMeans.
: param embedding : Word2Vec object
: param n_clusters, int, number of clusters in output
"""
def __init__(self):
self.word_to_cluster_dict = {}
self.cluster_dict = {}
def cluster(self, embedding, n_clusters=None):
vocab = list(embedding.wv.vocab.keys())
vocab_vectors = np.array([embedding[word] for word in vocab])
if not n_clusters:
# number of clusters = 10% of embedding vocabulary
# if larger than 1000, limit to 1000
n_clusters = int(len(vocab) * 0.1)
if n_clusters > 1000:
n_clusters = 1000
clustering = MiniBatchKMeans(n_clusters=n_clusters).fit(vocab_vectors)
cluster_labels = clustering.labels_
for i,cluster_label in enumerate(cluster_labels):
word = vocab[i]
etalon = embedding.wv.most_similar(positive=[clustering.cluster_centers_[cluster_label]])[0][0]
if etalon not in self.cluster_dict:
self.cluster_dict[etalon] = []
self.cluster_dict[etalon].append(word)
self.word_to_cluster_dict[word] = etalon
return True
def query(self, word):
try:
return self.cluster_dict[self.word_to_cluster_dict[word]]
except:
return []
def text_to_clusters(self, text):
text = [str(self.word_to_cluster_dict[word]) for word in text if word in self.word_to_cluster_dict]
return ' '.join(text)
def save(self, file_path):
try:
data = {"word_to_cluster_dict": self.word_to_cluster_dict, "cluster_dict": self.cluster_dict}
with open(file_path, 'w') as fh:
fh.write(json.dumps(data))
return True
except:
return False
def load(self, unique_id, task_type='train_tagger'):
file_path = os.path.join(MODELS_DIR, task_type, 'cluster_{}'.format(unique_id))
try:
with open(file_path) as fh:
data = json.loads(fh.read())
self.cluster_dict = data["cluster_dict"]
self.word_to_cluster_dict = data["word_to_cluster_dict"]
except:
return False
|
texta-tk/texta
|
utils/word_cluster.py
|
Python
|
gpl-3.0
| 2,505
| 0.007186
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
import sys
import zipfile
from pyload.plugin.Extractor import Extractor, ArchiveError, CRCError, PasswordError
from pyload.utils import fs_encode
class UnZip(Extractor):
__name = "UnZip"
__type = "extractor"
__version = "1.12"
__description = """Zip extractor plugin"""
__license = "GPLv3"
__authors = [("Walter Purcaro", "vuolter@gmail.com")]
EXTENSIONS = [".zip", ".zip64"]
NAME = __name__.rsplit('.', 1)[1]
VERSION = "(python %s.%s.%s)" % (sys.version_info[0], sys.version_info[1], sys.version_info[2])
@classmethod
def isUsable(cls):
return sys.version_info[:2] >= (2, 6)
def list(self, password=None):
with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z:
z.setpassword(password)
return z.namelist()
def check(self, password):
pass
def verify(self):
with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z:
badfile = z.testzip()
if badfile:
raise CRCError(badfile)
else:
raise PasswordError
def extract(self, password=None):
try:
with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z:
z.setpassword(password)
badfile = z.testzip()
if badfile:
raise CRCError(badfile)
else:
z.extractall(self.out)
except (zipfile.BadZipfile, zipfile.LargeZipFile), e:
raise ArchiveError(e)
except RuntimeError, e:
if "encrypted" in e:
raise PasswordError
else:
raise ArchiveError(e)
else:
self.files = z.namelist()
|
ardi69/pyload-0.4.10
|
pyload/plugin/extractor/UnZip.py
|
Python
|
gpl-3.0
| 1,891
| 0.00899
|
# https://leetcode.com/problems/linked-list-cycle-ii/
from ListNode import ListNode
class Solution(object):
def detectCycle(self, head):
slow,fast = head,head
while True:
if fast == None or fast.next == None : return None
slow = slow.next
fast = fast.next.next
if slow == fast :
break
while head != fast:
head = head.next
fast = fast.next
return head
|
menghanY/LeetCode-Python
|
LinkedList/LinkedListCycleII.py
|
Python
|
mit
| 474
| 0.018987
|
from pycse.lisp import *
def test_symbol():
assert Symbol('setf').lisp == 'setf'
def test_quote():
assert Quote('setf').lisp == "'setf"
def test_sharpquote():
assert SharpQuote('setf').lisp == "#'setf"
def test_cons():
assert Cons('a', 3).lisp == '("a" . 3)'
def test_Alist():
assert Alist(["a", 1, "b", 2]).lisp == '(("a" . 1) ("b" . 2))'
def test_vector():
assert Vector(["a", 1, 3]).lisp == '["a" 1 3]'
def test_Comma():
assert Comma(Symbol("setf")).lisp == ',setf'
def test_splice():
assert Splice([1, 3]).lisp == ',@(1 3)'
def test_backquote():
assert Backquote([Symbol("a"), 1]).lisp == '`(a 1)'
def test_comment():
assert Comment(Symbol("test")).lisp == '; test'
|
jkitchin/pycse
|
pycse/tests/test_lisp.py
|
Python
|
gpl-2.0
| 720
| 0.013889
|
from django.conf.urls.defaults import *
from tastypie.api import Api
#from tastytools.api import Api
from base.api import BaseResource
from bcmon.api import PlayoutResource as BcmonPlayoutResource
from bcmon.api import ChannelResource as BcmonChannelResource
from alibrary.api import MediaResource, ReleaseResource, ArtistResource, LabelResource, SimplePlaylistResource, PlaylistResource, PlaylistItemPlaylistResource
from importer.api import ImportResource, ImportFileResource
from exporter.api import ExportResource, ExportItemResource
from abcast.api import StationResource, ChannelResource, JingleResource, JingleSetResource, EmissionResource
from abcast.api import BaseResource as AbcastBaseResource
from istats.api import StatsResource
from fluent_comments.api import CommentResource
api = Api()
# base
api.register(BaseResource())
# bcmon
api.register(BcmonPlayoutResource())
api.register(BcmonChannelResource())
# library
api.register(MediaResource())
api.register(ReleaseResource())
api.register(ArtistResource())
api.register(LabelResource())
api.register(SimplePlaylistResource())
api.register(PlaylistResource())
api.register(PlaylistItemPlaylistResource())
# importer
api.register(ImportResource())
api.register(ImportFileResource())
# exporter
api.register(ExportResource())
api.register(ExportItemResource())
# abcast
api.register(AbcastBaseResource())
api.register(StationResource())
api.register(ChannelResource())
api.register(JingleResource())
api.register(JingleSetResource())
### scheduler
api.register(EmissionResource())
# comment
api.register(CommentResource())
# server stats
api.register(StatsResource())
"""
urlpatterns = patterns('',
(r'^', include(api.urls)),
)
"""
|
hzlf/openbroadcast
|
website/urls_api.py
|
Python
|
gpl-3.0
| 1,715
| 0.002915
|
import socket
import sys
import threading
try:
from Queue import Queue, Empty
except:
from queue import Queue, Empty
from collections import OrderedDict
from . import parseintset
DEFAULT_THREAD_LIMIT = 200
CLOSED_STATUS = 'closed'
OPEN_STATUS = 'open'
if sys.version_info.major >= 3:
unicode = str
class Scanner(threading.Thread):
def __init__(self, input_queue, output_queue, timeout=5):
threading.Thread.__init__(self)
# These are the scan queues
self.input_queue = input_queue
self.output_queue = output_queue
self.keep_running = True
self.timeout = timeout
def run(self):
# This loop will exit when the input_queue generates an exception because all of the threads
# are complete
while self.keep_running:
try:
host, port = self.input_queue.get(timeout=5)
except Empty:
continue
# Make the socket for performing the scan
sock_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_instance.settimeout(self.timeout)
try:
# Connect to the host via TCP
sock_instance.connect((host, port))
except socket.error:
# Note that it is in the closed state
self.output_queue.put((host, port, CLOSED_STATUS))
else:
# Note that it is in the open state
self.output_queue.put((host, port, OPEN_STATUS))
sock_instance.close()
self.input_queue.task_done()
self.output_queue.task_done()
def stop_running(self):
self.keep_running = False
def port_scan(host, ports, thread_count=DEFAULT_THREAD_LIMIT, callback=None, timeout=5):
# Parse the ports if necessary
if isinstance(ports, (str, unicode)):
parsed_ports = parseintset.parseIntSet(ports)
else:
parsed_ports = ports
# Setup the queues
to_scan = Queue()
scanned = Queue()
# Prepare the scanners
# These scanners will monitor the input queue for new things to scan, scan them, and them put
# them in the output queue
scanners = [Scanner(to_scan, scanned, timeout) for i in range(min(thread_count,len(ports)))]
for scanner in scanners:
scanner.start()
# Create the list of host ports to scan
host_ports = [(host, port) for port in parsed_ports]
for host_port in host_ports:
to_scan.put(host_port)
# This will store the list of successfully executed host/port combiations
results = {}
# This will contain the resulting data
data = []
for host, port in host_ports:
while (host, port) not in results:
# Get the queued thread: this will block if necessary
scanned_host, scanned_port, scan_status = scanned.get()
# Log that that we performed the scan
results[(scanned_host, scanned_port)] = scan_status
# Append the data
data.append(OrderedDict({
'dest' : scanned_host,
'port' : 'TCP\\' + str(scanned_port),
'status': scan_status
}))
# Run the callback if one is present
if callback is not None:
callback(scanned_host, scanned_port, scan_status)
# Stop the threads
for scanner in scanners:
scanner.stop_running()
return data
|
LukeMurphey/splunk-network-tools
|
src/bin/network_tools_app/portscan.py
|
Python
|
apache-2.0
| 3,454
| 0.003185
|
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import StringIO
import urlparse
from rdflib import Graph, RDF
from rdflib.namespace import OWL
from rdflib.plugins.parsers.notation3 import BadSyntax
import agora.fountain.vocab.schema as sch
__author__ = 'Fernando Serena'
class VocabularyException(Exception):
pass
class DuplicateVocabulary(VocabularyException):
pass
class VocabularyNotFound(VocabularyException):
pass
class UnknownVocabulary(VocabularyException):
pass
def __load_owl(owl):
"""
:param owl:
:return:
"""
owl_g = Graph()
for f in ['turtle', 'xml']:
try:
owl_g.parse(source=StringIO.StringIO(owl), format=f)
break
except SyntaxError:
pass
if not len(owl_g):
raise VocabularyException()
try:
uri = list(owl_g.subjects(RDF.type, OWL.Ontology)).pop()
vid = [p for (p, u) in owl_g.namespaces() if uri in u and p != '']
imports = owl_g.objects(uri, OWL.imports)
if not len(vid):
vid = urlparse.urlparse(uri).path.split('/')[-1]
else:
vid = vid.pop()
return vid, uri, owl_g, imports
except IndexError:
raise VocabularyNotFound()
def add_vocabulary(owl):
"""
:param owl:
:return:
"""
vid, uri, owl_g, imports = __load_owl(owl)
if vid in sch.contexts():
raise DuplicateVocabulary('Vocabulary already contained')
sch.add_context(vid, owl_g)
vids = [vid]
# TODO: Import referenced ontologies
for im_uri in imports:
print im_uri
im_g = Graph()
try:
im_g.load(im_uri, format='turtle')
except BadSyntax:
try:
im_g.load(im_uri)
except BadSyntax:
print 'bad syntax in {}'.format(im_uri)
try:
child_vids = add_vocabulary(im_g.serialize(format='turtle'))
vids.extend(child_vids)
except DuplicateVocabulary, e:
print 'already added'
except VocabularyNotFound, e:
print 'uri not found for {}'.format(im_uri)
except Exception, e:
print e.message
return vids
def update_vocabulary(vid, owl):
"""
:param vid:
:param owl:
:return:
"""
owl_vid, uri, owl_g, imports = __load_owl(owl)
if vid != owl_vid:
raise Exception("Identifiers don't match")
if vid not in sch.contexts():
raise UnknownVocabulary('Vocabulary id is not known')
sch.update_context(vid, owl_g)
def delete_vocabulary(vid):
"""
:param vid:
:return:
"""
if vid not in sch.contexts():
raise UnknownVocabulary('Vocabulary id is not known')
sch.remove_context(vid)
def get_vocabularies():
"""
:return:
"""
return sch.contexts()
def get_vocabulary(vid):
"""
:param vid:
:return:
"""
return sch.get_context(vid).serialize(format='turtle')
|
SmartDeveloperHub/agora-fountain
|
agora/fountain/vocab/onto.py
|
Python
|
apache-2.0
| 4,018
| 0.000249
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
# Copyright 2013 Akretion
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from datetime import datetime, timedelta
from openerp.osv import fields, orm
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.addons.connector.session import ConnectorSession
from openerp.addons.connector.connector import ConnectorUnit
from openerp.addons.connector.unit.mapper import (mapping,
only_create,
ImportMapper
)
from .unit.backend_adapter import GenericAdapter
from .unit.import_synchronizer import (import_batch,
DirectBatchImport,
MagentoImportSynchronizer,
AddCheckpoint,
)
from .partner import partner_import_batch
from .sale import sale_order_import_batch
from .backend import magento
from .connector import add_checkpoint
_logger = logging.getLogger(__name__)
IMPORT_DELTA_BUFFER = 30 # seconds
class magento_backend(orm.Model):
_name = 'magento.backend'
_description = 'Magento Backend'
_inherit = 'connector.backend'
_backend_type = 'magento'
def select_versions(self, cr, uid, context=None):
""" Available versions in the backend.
Can be inherited to add custom versions. Using this method
to add a version from an ``_inherit`` does not constrain
to redefine the ``version`` field in the ``_inherit`` model.
"""
return [('1.7', '1.7')]
def _select_versions(self, cr, uid, context=None):
""" Available versions in the backend.
If you want to add a version, do not override this
method, but ``select_version``.
"""
return self.select_versions(cr, uid, context=context)
def _get_stock_field_id(self, cr, uid, context=None):
field_ids = self.pool.get('ir.model.fields').search(
cr, uid,
[('model', '=', 'product.product'),
('name', '=', 'virtual_available')],
context=context)
return field_ids[0]
_columns = {
'version': fields.selection(
_select_versions,
string='Version',
required=True),
'location': fields.char(
'Location',
required=True,
help="Url to magento application"),
'admin_location': fields.char('Admin Location'),
'use_custom_api_path': fields.boolean(
'Custom Api Path',
help="The default API path is '/index.php/api/xmlrpc'. "
"Check this box if you use a custom API path, in that case, "
"the location has to be completed with the custom API path "),
'username': fields.char(
'Username',
help="Webservice user"),
'password': fields.char(
'Password',
help="Webservice password"),
'use_auth_basic': fields.boolean(
'Use HTTP Auth Basic',
help="Use a Basic Access Authentication for the API. "
"The Magento server could be configured to restrict access "
"using a HTTP authentication based on a username and "
"a password."),
'auth_basic_username': fields.char(
'Basic Auth. Username',
help="Basic access authentication web server side username"),
'auth_basic_password': fields.char(
'Basic Auth. Password',
help="Basic access authentication web server side password"),
'sale_prefix': fields.char(
'Sale Prefix',
help="A prefix put before the name of imported sales orders.\n"
"For instance, if the prefix is 'mag-', the sales "
"order 100000692 in Magento, will be named 'mag-100000692' "
"in OpenERP."),
'warehouse_id': fields.many2one('stock.warehouse',
'Warehouse',
required=True,
help='Warehouse used to compute the '
'stock quantities.'),
'website_ids': fields.one2many(
'magento.website', 'backend_id',
string='Website', readonly=True),
'default_lang_id': fields.many2one(
'res.lang',
'Default Language',
help="If a default language is selected, the records "
"will be imported in the translation of this language.\n"
"Note that a similar configuration exists "
"for each storeview."),
'default_category_id': fields.many2one(
'product.category',
string='Default Product Category',
help='If a default category is selected, products imported '
'without a category will be linked to it.'),
# add a field `auto_activate` -> activate a cron
'import_products_from_date': fields.datetime(
'Import products from date'),
'import_categories_from_date': fields.datetime(
'Import categories from date'),
'catalog_price_tax_included': fields.boolean('Prices include tax'),
'product_stock_field_id': fields.many2one(
'ir.model.fields',
string='Stock Field',
domain="[('model', 'in', ['product.product', 'product.template']),"
" ('ttype', '=', 'float')]",
help="Choose the field of the product which will be used for "
"stock inventory updates.\nIf empty, Quantity Available "
"is used."),
'product_binding_ids': fields.one2many('magento.product.product',
'backend_id',
string='Magento Products',
readonly=True),
}
_defaults = {
'product_stock_field_id': _get_stock_field_id,
'use_custom_api_path': False,
'use_auth_basic': False,
}
_sql_constraints = [
('sale_prefix_uniq', 'unique(sale_prefix)',
"A backend with the same sale prefix already exists")
]
def check_magento_structure(self, cr, uid, ids, context=None):
""" Used in each data import.
Verify if a website exists for each backend before starting the import.
"""
for backend_id in ids:
website_ids = self.pool['magento.website'].search(
cr, uid, [('backend_id', '=', backend_id)], context=context)
if not website_ids:
self.synchronize_metadata(cr, uid, backend_id, context=context)
return True
def synchronize_metadata(self, cr, uid, ids, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
session = ConnectorSession(cr, uid, context=context)
for backend_id in ids:
for model in ('magento.website',
'magento.store',
'magento.storeview'):
# import directly, do not delay because this
# is a fast operation, a direct return is fine
# and it is simpler to import them sequentially
import_batch(session, model, backend_id)
return True
def import_partners(self, cr, uid, ids, context=None):
""" Import partners from all websites """
if not hasattr(ids, '__iter__'):
ids = [ids]
self.check_magento_structure(cr, uid, ids, context=context)
for backend in self.browse(cr, uid, ids, context=context):
for website in backend.website_ids:
website.import_partners()
return True
def import_sale_orders(self, cr, uid, ids, context=None):
""" Import sale orders from all store views """
if not hasattr(ids, '__iter__'):
ids = [ids]
storeview_obj = self.pool.get('magento.storeview')
storeview_ids = storeview_obj.search(cr, uid,
[('backend_id', 'in', ids)],
context=context)
storeviews = storeview_obj.browse(cr, uid, storeview_ids,
context=context)
for storeview in storeviews:
storeview.import_sale_orders()
return True
def import_customer_groups(self, cr, uid, ids, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
self.check_magento_structure(cr, uid, ids, context=context)
session = ConnectorSession(cr, uid, context=context)
for backend_id in ids:
import_batch.delay(session, 'magento.res.partner.category',
backend_id)
return True
def _import_from_date(self, cr, uid, ids, model, from_date_field,
context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
self.check_magento_structure(cr, uid, ids, context=context)
session = ConnectorSession(cr, uid, context=context)
import_start_time = datetime.now()
for backend in self.browse(cr, uid, ids, context=context):
from_date = getattr(backend, from_date_field)
if from_date:
from_date = datetime.strptime(from_date,
DEFAULT_SERVER_DATETIME_FORMAT)
else:
from_date = None
import_batch.delay(session, model,
backend.id, filters={'from_date': from_date})
# Records from Magento are imported based on their `created_at`
# date. This date is set on Magento at the beginning of a
# transaction, so if the import is run between the beginning and
# the end of a transaction, the import of a record may be
# missed. That's why we add a small buffer back in time where
# the eventually missed records will be retrieved. This also
# means that we'll have jobs that import twice the same records,
# but this is not a big deal because they will be skipped when
# the last `sync_date` is the same.
next_time = import_start_time - timedelta(seconds=IMPORT_DELTA_BUFFER)
next_time = next_time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
self.write(cr, uid, ids, {from_date_field: next_time}, context=context)
def import_product_categories(self, cr, uid, ids, context=None):
self._import_from_date(cr, uid, ids, 'magento.product.category',
'import_categories_from_date', context=context)
return True
def import_product_product(self, cr, uid, ids, context=None):
self._import_from_date(cr, uid, ids, 'magento.product.product',
'import_products_from_date', context=context)
return True
def update_product_stock_qty(self, cr, uid, ids, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
mag_product_obj = self.pool.get('magento.product.product')
product_ids = mag_product_obj.search(cr, uid,
[('backend_id', 'in', ids),
('no_stock_sync', '=', False)],
context=context)
mag_product_obj.recompute_magento_qty(cr, uid, product_ids,
context=context)
return True
def _magento_backend(self, cr, uid, callback, domain=None, context=None):
if domain is None:
domain = []
ids = self.search(cr, uid, domain, context=context)
if ids:
callback(cr, uid, ids, context=context)
def _scheduler_import_sale_orders(self, cr, uid, domain=None,
context=None):
self._magento_backend(cr, uid, self.import_sale_orders,
domain=domain, context=context)
def _scheduler_import_customer_groups(self, cr, uid, domain=None,
context=None):
self._magento_backend(cr, uid, self.import_customer_groups,
domain=domain, context=context)
def _scheduler_import_partners(self, cr, uid, domain=None, context=None):
self._magento_backend(cr, uid, self.import_partners,
domain=domain, context=context)
def _scheduler_import_product_categories(self, cr, uid, domain=None,
context=None):
self._magento_backend(cr, uid, self.import_product_categories,
domain=domain, context=context)
def _scheduler_import_product_product(self, cr, uid, domain=None,
context=None):
self._magento_backend(cr, uid, self.import_product_product,
domain=domain, context=context)
def _scheduler_update_product_stock_qty(self, cr, uid,
domain=None, context=None):
self._magento_backend(cr, uid, self.update_product_stock_qty,
domain=domain, context=context)
def output_recorder(self, cr, uid, ids, context=None):
""" Utility method to output a file containing all the recorded
requests / responses with Magento. Used to generate test data.
Should be called with ``erppeek`` for instance.
"""
from .unit.backend_adapter import output_recorder
import os
import tempfile
fmt = '%Y-%m-%d-%H-%M-%S'
timestamp = datetime.now().strftime(fmt)
filename = 'output_%s_%s' % (cr.dbname, timestamp)
path = os.path.join(tempfile.gettempdir(), filename)
output_recorder(path)
return path
# TODO migrate from external.shop.group
class magento_website(orm.Model):
_name = 'magento.website'
_inherit = 'magento.binding'
_description = 'Magento Website'
_order = 'sort_order ASC, id ASC'
_columns = {
'name': fields.char('Name', required=True, readonly=True),
'code': fields.char('Code', readonly=True),
'sort_order': fields.integer('Sort Order', readonly=True),
'store_ids': fields.one2many(
'magento.store',
'website_id',
string="Stores",
readonly=True),
'import_partners_from_date': fields.datetime(
'Import partners from date'),
'product_binding_ids': fields.many2many('magento.product.product',
string='Magento Products',
readonly=True),
}
_sql_constraints = [
('magento_uniq', 'unique(backend_id, magento_id)',
'A website with the same ID on Magento already exists.'),
]
def import_partners(self, cr, uid, ids, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
session = ConnectorSession(cr, uid, context=context)
import_start_time = datetime.now()
for website in self.browse(cr, uid, ids, context=context):
backend_id = website.backend_id.id
if website.import_partners_from_date:
from_date = datetime.strptime(
website.import_partners_from_date,
DEFAULT_SERVER_DATETIME_FORMAT)
else:
from_date = None
partner_import_batch.delay(
session, 'magento.res.partner', backend_id,
{'magento_website_id': website.magento_id,
'from_date': from_date})
# Records from Magento are imported based on their `created_at`
# date. This date is set on Magento at the beginning of a
# transaction, so if the import is run between the beginning and
# the end of a transaction, the import of a record may be
# missed. That's why we add a small buffer back in time where
# the eventually missed records will be retrieved. This also
# means that we'll have jobs that import twice the same records,
# but this is not a big deal because they will be skipped when
# the last `sync_date` is the same.
next_time = import_start_time - timedelta(seconds=IMPORT_DELTA_BUFFER)
next_time = next_time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
self.write(cr, uid, ids, {'import_partners_from_date': next_time},
context=context)
return True
# TODO migrate from sale.shop (create a magento.store + associated
# sale.shop)
class magento_store(orm.Model):
_name = 'magento.store'
_inherit = 'magento.binding'
_description = 'Magento Store'
_inherits = {'sale.shop': 'openerp_id'}
def _get_store_from_website(self, cr, uid, ids, context=None):
store_obj = self.pool.get('magento.store')
return store_obj.search(cr, uid,
[('website_id', 'in', ids)],
context=context)
_columns = {
'website_id': fields.many2one(
'magento.website',
'Magento Website',
required=True,
readonly=True,
ondelete='cascade'),
'openerp_id': fields.many2one(
'sale.shop',
string='Sale Shop',
required=True,
readonly=True,
ondelete='cascade'),
'backend_id': fields.related(
'website_id', 'backend_id',
type='many2one',
relation='magento.backend',
string='Magento Backend',
store={
'magento.store': (lambda self, cr, uid, ids, c=None: ids,
['website_id'], 10),
'magento.website': (_get_store_from_website,
['backend_id'], 20),
},
readonly=True),
'storeview_ids': fields.one2many(
'magento.storeview',
'store_id',
string="Storeviews",
readonly=True),
'send_picking_done_mail': fields.boolean(
'Send email notification on picking done',
help="Does the picking export/creation should send "
"an email notification on Magento side?"),
'send_invoice_paid_mail': fields.boolean(
'Send email notification on invoice validated/paid',
help="Does the invoice export/creation should send "
"an email notification on Magento side?"),
'create_invoice_on': fields.selection(
[('open', 'Validate'),
('paid', 'Paid')],
'Create invoice on action',
required=True,
help="Should the invoice be created in Magento "
"when it is validated or when it is paid in OpenERP?\n"
"This only takes effect if the sales order's related "
"payment method is not giving an option for this by "
"itself. (See Payment Methods)"),
}
_defaults = {
'create_invoice_on': 'paid',
}
_sql_constraints = [
('magento_uniq', 'unique(backend_id, magento_id)',
'A store with the same ID on Magento already exists.'),
]
class sale_shop(orm.Model):
_inherit = 'sale.shop'
_columns = {
'magento_bind_ids': fields.one2many(
'magento.store', 'openerp_id',
string='Magento Bindings',
readonly=True),
}
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default['magento_bind_ids'] = False
return super(sale_shop, self).copy_data(cr, uid, id,
default=default,
context=context)
# TODO: migrate from magerp.storeviews
class magento_storeview(orm.Model):
_name = 'magento.storeview'
_inherit = 'magento.binding'
_description = "Magento Storeview"
_order = 'sort_order ASC, id ASC'
_columns = {
'name': fields.char('Name', required=True, readonly=True),
'code': fields.char('Code', readonly=True),
'enabled': fields.boolean('Enabled', readonly=True),
'sort_order': fields.integer('Sort Order', readonly=True),
'store_id': fields.many2one('magento.store', 'Store',
ondelete='cascade', readonly=True),
'lang_id': fields.many2one('res.lang', 'Language'),
'backend_id': fields.related(
'store_id', 'website_id', 'backend_id',
type='many2one',
relation='magento.backend',
string='Magento Backend',
store=True,
readonly=True),
'import_orders_from_date': fields.datetime(
'Import sale orders from date',
help='do not consider non-imported sale orders before this date. '
'Leave empty to import all sale orders'),
'no_sales_order_sync': fields.boolean(
'No Sales Order Synchronization',
help='Check if the storeview is active in Magento '
'but its sales orders should not be imported.'),
}
_defaults = {
'no_sales_order_sync': False,
}
_sql_constraints = [
('magento_uniq', 'unique(backend_id, magento_id)',
'A storeview with same ID on Magento already exists.'),
]
def import_sale_orders(self, cr, uid, ids, context=None):
session = ConnectorSession(cr, uid, context=context)
import_start_time = datetime.now()
for storeview in self.browse(cr, uid, ids, context=context):
if storeview.no_sales_order_sync:
_logger.debug("The storeview '%s' is active in Magento "
"but its sales orders should not be imported." %
storeview.name)
continue
backend_id = storeview.backend_id.id
if storeview.import_orders_from_date:
from_date = datetime.strptime(
storeview.import_orders_from_date,
DEFAULT_SERVER_DATETIME_FORMAT)
else:
from_date = None
sale_order_import_batch.delay(
session,
'magento.sale.order',
backend_id,
{'magento_storeview_id': storeview.magento_id,
'from_date': from_date},
priority=1) # executed as soon as possible
# Records from Magento are imported based on their `created_at`
# date. This date is set on Magento at the beginning of a
# transaction, so if the import is run between the beginning and
# the end of a transaction, the import of a record may be
# missed. That's why we add a small buffer back in time where
# the eventually missed records will be retrieved. This also
# means that we'll have jobs that import twice the same records,
# but this is not a big deal because the sales orders will be
# imported the first time and the jobs will be skipped on the
# subsequent imports
next_time = import_start_time - timedelta(seconds=IMPORT_DELTA_BUFFER)
next_time = next_time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
self.write(cr, uid, ids, {'import_orders_from_date': next_time},
context=context)
return True
@magento
class WebsiteAdapter(GenericAdapter):
_model_name = 'magento.website'
_magento_model = 'ol_websites'
_admin_path = 'system_store/editWebsite/website_id/{id}'
@magento
class StoreAdapter(GenericAdapter):
_model_name = 'magento.store'
_magento_model = 'ol_groups'
_admin_path = 'system_store/editGroup/group_id/{id}'
@magento
class StoreviewAdapter(GenericAdapter):
_model_name = 'magento.storeview'
_magento_model = 'ol_storeviews'
_admin_path = 'system_store/editStore/store_id/{id}'
@magento
class MetadataBatchImport(DirectBatchImport):
""" Import the records directly, without delaying the jobs.
Import the Magento Websites, Stores, Storeviews
They are imported directly because this is a rare and fast operation,
and we don't really bother if it blocks the UI during this time.
(that's also a mean to rapidly check the connectivity with Magento).
"""
_model_name = [
'magento.website',
'magento.store',
'magento.storeview',
]
@magento
class WebsiteImportMapper(ImportMapper):
_model_name = 'magento.website'
direct = [('code', 'code'),
('sort_order', 'sort_order')]
@mapping
def name(self, record):
name = record['name']
if name is None:
name = _('Undefined')
return {'name': name}
@mapping
def backend_id(self, record):
return {'backend_id': self.backend_record.id}
@magento
class StoreImportMapper(ImportMapper):
_model_name = 'magento.store'
direct = [('name', 'name')]
@mapping
def website_id(self, record):
binder = self.get_binder_for_model('magento.website')
binding_id = binder.to_openerp(record['website_id'])
return {'website_id': binding_id}
@mapping
@only_create
def warehouse_id(self, record):
return {'warehouse_id': self.backend_record.warehouse_id.id}
@magento
class StoreviewImportMapper(ImportMapper):
_model_name = 'magento.storeview'
direct = [
('name', 'name'),
('code', 'code'),
('is_active', 'enabled'),
('sort_order', 'sort_order'),
]
@mapping
def store_id(self, record):
binder = self.get_binder_for_model('magento.store')
binding_id = binder.to_openerp(record['group_id'])
return {'store_id': binding_id}
@magento
class StoreImport(MagentoImportSynchronizer):
""" Import one Magento Store (create a sale.shop via _inherits) """
_model_name = ['magento.store',
]
def _create(self, data):
openerp_binding_id = super(StoreImport, self)._create(data)
checkpoint = self.get_connector_unit_for_model(AddCheckpoint)
checkpoint.run(openerp_binding_id)
return openerp_binding_id
@magento
class StoreviewImport(MagentoImportSynchronizer):
""" Import one Magento Storeview """
_model_name = ['magento.storeview',
]
def _create(self, data):
openerp_binding_id = super(StoreviewImport, self)._create(data)
checkpoint = self.get_connector_unit_for_model(StoreViewAddCheckpoint)
checkpoint.run(openerp_binding_id)
return openerp_binding_id
@magento
class StoreViewAddCheckpoint(ConnectorUnit):
""" Add a connector.checkpoint on the magento.storeview
record """
_model_name = ['magento.storeview',
]
def run(self, openerp_binding_id):
add_checkpoint(self.session,
self.model._name,
openerp_binding_id,
self.backend_record.id)
|
credativUK/connector-magento
|
__unported__/magentoerpconnect/magento_model.py
|
Python
|
agpl-3.0
| 28,454
| 0
|
"""
OnionShare | https://onionshare.org/
Copyright (C) 2014 Micah Lee <micah@micahflee.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from onionshare import web
from nose import with_setup
def test_generate_slug_length():
"""generates a 26-character slug"""
assert len(web.slug) == 26
def test_generate_slug_characters():
"""generates a base32-encoded slug"""
def is_b32(string):
b32_alphabet = "01234556789abcdefghijklmnopqrstuvwxyz"
return all(char in b32_alphabet for char in string)
assert is_b32(web.slug)
|
kyonetca/onionshare
|
test/onionshare_web_test.py
|
Python
|
gpl-3.0
| 1,129
| 0
|
import sys
from os.path import join, abspath, dirname
# PATH vars
here = lambda *x: join(abspath(dirname(__file__)), *x)
PROJECT_ROOT = here("..")
root = lambda *x: join(abspath(PROJECT_ROOT), *x)
sys.path.insert(0, root('apps'))
ADMINS = (
('Maxime Lapointe', 'maxx@themaxx.ca'),
)
MANAGERS = ADMINS
SHELL_PLUS = 'ipython'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'CHANGE THIS!!!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'www',
)
PROJECT_APPS = ()
INSTALLED_APPS += PROJECT_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'lapare.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'lapare.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '../www_lapare_ca.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'fr-CA'
TIME_ZONE = 'America/Montreal'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = root('assets', 'uploads')
MEDIA_URL = '/media/'
# Additional locations of static files
STATICFILES_DIRS = (
root('assets'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_STORAGE = ('django.contrib.staticfiles.storage.'
'ManifestStaticFilesStorage')
TEMPLATE_DIRS = (
root('templates'),
)
# .local.py overrides all the common settings.
try:
from .local import *
except ImportError:
from .production import *
# importing test settings file if necessary
if len(sys.argv) > 1 and 'test' in sys.argv[1]:
from .testing import *
|
themaxx75/lapare-bijoux
|
lapare.ca/lapare/settings/base.py
|
Python
|
bsd-3-clause
| 2,795
| 0.000716
|
# Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
import os
import re
import logging
import tempfile
import argparse
from twisted.internet.defer import inlineCallbacks
import subprocess
from email.Utils import getaddresses, parseaddr
from email.message import Message
import mailcap
from cStringIO import StringIO
from alot.commands import Command, registerCommand
from alot.commands.globals import ExternalCommand
from alot.commands.globals import FlushCommand
from alot.commands.globals import ComposeCommand
from alot.commands.globals import MoveCommand
from alot.commands.globals import CommandCanceled
from alot.commands.envelope import SendCommand
from alot import completion
from alot.db.utils import decode_header
from alot.db.utils import encode_header
from alot.db.utils import extract_headers
from alot.db.utils import extract_body
from alot.db.envelope import Envelope
from alot.db.attachment import Attachment
from alot.db.errors import DatabaseROError
from alot.settings import settings
from alot.helper import parse_mailcap_nametemplate
from alot.helper import split_commandstring
from alot.helper import email_as_string
from alot.utils.booleanaction import BooleanAction
from alot.completion import ContactsCompleter
from alot.widgets.globals import AttachmentWidget
MODE = 'thread'
def determine_sender(mail, action='reply'):
"""
Inspect a given mail to reply/forward/bounce and find the most appropriate
account to act from and construct a suitable From-Header to use.
:param mail: the email to inspect
:type mail: `email.message.Message`
:param action: intended use case: one of "reply", "forward" or "bounce"
:type action: str
"""
assert action in ['reply', 'forward', 'bounce']
realname = None
address = None
# get accounts
my_accounts = settings.get_accounts()
assert my_accounts, 'no accounts set!'
# extract list of addresses to check for my address
# X-Envelope-To and Envelope-To are used to store the recipient address
# if not included in other fields
candidate_addresses = getaddresses(mail.get_all('To', []) +
mail.get_all('Cc', []) +
mail.get_all('Delivered-To', []) +
mail.get_all('X-Envelope-To', []) +
mail.get_all('Envelope-To', []) +
mail.get_all('From', []))
logging.debug('candidate addresses: %s' % candidate_addresses)
# pick the most important account that has an address in candidates
# and use that accounts realname and the address found here
for account in my_accounts:
acc_addresses = account.get_addresses()
for alias in acc_addresses:
if realname is not None:
break
regex = re.compile(re.escape(alias), flags=re.IGNORECASE)
for seen_name, seen_address in candidate_addresses:
if regex.match(seen_address):
logging.debug("match!: '%s' '%s'" % (seen_address, alias))
if settings.get(action + '_force_realname'):
realname = account.realname
else:
realname = seen_name
if settings.get(action + '_force_address'):
address = account.address
else:
address = seen_address
# revert to default account if nothing found
if realname is None:
account = my_accounts[0]
realname = account.realname
address = account.address
logging.debug('using realname: "%s"' % realname)
logging.debug('using address: %s' % address)
from_value = address if realname == '' else '%s <%s>' % (realname, address)
return from_value, account
@registerCommand(MODE, 'reply', arguments=[
(['--all'], {'action': 'store_true', 'help': 'reply to all'}),
(['--spawn'], {'action': BooleanAction, 'default': None,
'help': 'open editor in new window'})])
class ReplyCommand(Command):
"""reply to message"""
repeatable = True
def __init__(self, message=None, all=False, spawn=None, **kwargs):
"""
:param message: message to reply to (defaults to selected message)
:type message: `alot.db.message.Message`
:param all: group reply; copies recipients from Bcc/Cc/To to the reply
:type all: bool
:param spawn: force spawning of editor in a new terminal
:type spawn: bool
"""
self.message = message
self.groupreply = all
self.force_spawn = spawn
Command.__init__(self, **kwargs)
def apply(self, ui):
# get message to forward if not given in constructor
if not self.message:
self.message = ui.current_buffer.get_selected_message()
mail = self.message.get_email()
# set body text
name, address = self.message.get_author()
timestamp = self.message.get_date()
qf = settings.get_hook('reply_prefix')
if qf:
quotestring = qf(name, address, timestamp, ui=ui, dbm=ui.dbman)
else:
quotestring = 'Quoting %s (%s)\n' % (name or address, timestamp)
mailcontent = quotestring
quotehook = settings.get_hook('text_quote')
if quotehook:
mailcontent += quotehook(self.message.accumulate_body())
else:
quote_prefix = settings.get('quote_prefix')
for line in self.message.accumulate_body().splitlines():
mailcontent += quote_prefix + line + '\n'
envelope = Envelope(bodytext=mailcontent)
# copy subject
subject = decode_header(mail.get('Subject', ''))
reply_subject_hook = settings.get_hook('reply_subject')
if reply_subject_hook:
subject = reply_subject_hook(subject)
else:
rsp = settings.get('reply_subject_prefix')
if not subject.lower().startswith(('re:', rsp.lower())):
subject = rsp + subject
envelope.add('Subject', subject)
# set From-header and sending account
try:
from_header, account = determine_sender(mail, 'reply')
except AssertionError as e:
ui.notify(e.message, priority='error')
return
envelope.add('From', from_header)
# set To
sender = mail['Reply-To'] or mail['From']
my_addresses = settings.get_addresses()
sender_address = parseaddr(sender)[1]
cc = ''
# check if reply is to self sent message
if sender_address in my_addresses:
recipients = [mail['To']]
emsg = 'Replying to own message, set recipients to: %s' \
% recipients
logging.debug(emsg)
else:
recipients = [sender]
if self.groupreply:
# make sure that our own address is not included
# if the message was self-sent, then our address is not included
MFT = mail.get_all('Mail-Followup-To', [])
followupto = self.clear_my_address(my_addresses, MFT)
if followupto and settings.get('honor_followup_to'):
logging.debug('honor followup to: %s', followupto)
recipients = [followupto]
# since Mail-Followup-To was set, ignore the Cc header
else:
if sender != mail['From']:
recipients.append(mail['From'])
# append To addresses if not replying to self sent message
if sender_address not in my_addresses:
cleared = self.clear_my_address(
my_addresses, mail.get_all('To', []))
recipients.append(cleared)
# copy cc for group-replies
if 'Cc' in mail:
cc = self.clear_my_address(
my_addresses, mail.get_all('Cc', []))
envelope.add('Cc', decode_header(cc))
to = ', '.join(recipients)
logging.debug('reply to: %s' % to)
envelope.add('To', decode_header(to))
# if any of the recipients is a mailinglist that we are subscribed to,
# set Mail-Followup-To header so that duplicates are avoided
if settings.get('followup_to'):
# to and cc are already cleared of our own address
allrecipients = [to] + [cc]
lists = settings.get('mailinglists')
# check if any recipient address matches a known mailing list
if any([addr in lists for n, addr in getaddresses(allrecipients)]):
followupto = ', '.join(allrecipients)
logging.debug('mail followup to: %s' % followupto)
envelope.add('Mail-Followup-To', decode_header(followupto))
# set In-Reply-To header
envelope.add('In-Reply-To', '<%s>' % self.message.get_message_id())
# set References header
old_references = mail.get('References', '')
if old_references:
old_references = old_references.split()
references = old_references[-8:]
if len(old_references) > 8:
references = old_references[:1] + references
references.append('<%s>' % self.message.get_message_id())
envelope.add('References', ' '.join(references))
else:
envelope.add('References', '<%s>' % self.message.get_message_id())
# continue to compose
ui.apply_command(ComposeCommand(envelope=envelope,
spawn=self.force_spawn))
def clear_my_address(self, my_addresses, value):
"""return recipient header without the addresses in my_addresses"""
new_value = []
for name, address in getaddresses(value):
if address not in my_addresses:
if name != '':
new_value.append('"%s" <%s>' % (name, address))
else:
new_value.append(address)
return ', '.join(new_value)
@registerCommand(MODE, 'forward', arguments=[
(['--attach'], {'action': 'store_true', 'help': 'attach original mail'}),
(['--spawn'], {'action': BooleanAction, 'default': None,
'help': 'open editor in new window'})])
class ForwardCommand(Command):
"""forward message"""
repeatable = True
def __init__(self, message=None, attach=True, spawn=None, **kwargs):
"""
:param message: message to forward (defaults to selected message)
:type message: `alot.db.message.Message`
:param attach: attach original mail instead of inline quoting its body
:type attach: bool
:param spawn: force spawning of editor in a new terminal
:type spawn: bool
"""
self.message = message
self.inline = not attach
self.force_spawn = spawn
Command.__init__(self, **kwargs)
def apply(self, ui):
# get message to forward if not given in constructor
if not self.message:
self.message = ui.current_buffer.get_selected_message()
mail = self.message.get_email()
envelope = Envelope()
if self.inline: # inline mode
# set body text
name, address = self.message.get_author()
timestamp = self.message.get_date()
qf = settings.get_hook('forward_prefix')
if qf:
quote = qf(name, address, timestamp, ui=ui, dbm=ui.dbman)
else:
quote = 'Forwarded message from %s (%s):\n' % (
name or address, timestamp)
mailcontent = quote
quotehook = settings.get_hook('text_quote')
if quotehook:
mailcontent += quotehook(self.message.accumulate_body())
else:
quote_prefix = settings.get('quote_prefix')
for line in self.message.accumulate_body().splitlines():
mailcontent += quote_prefix + line + '\n'
envelope.body = mailcontent
for a in self.message.get_attachments():
envelope.attach(a)
else: # attach original mode
# attach original msg
original_mail = Message()
original_mail.set_type('message/rfc822')
original_mail['Content-Disposition'] = 'attachment'
original_mail.set_payload(email_as_string(mail))
envelope.attach(Attachment(original_mail))
# copy subject
subject = decode_header(mail.get('Subject', ''))
subject = 'Fwd: ' + subject
forward_subject_hook = settings.get_hook('forward_subject')
if forward_subject_hook:
subject = forward_subject_hook(subject)
else:
fsp = settings.get('forward_subject_prefix')
if not subject.startswith(('Fwd:', fsp)):
subject = fsp + subject
envelope.add('Subject', subject)
# set From-header and sending account
try:
from_header, account = determine_sender(mail, 'reply')
except AssertionError as e:
ui.notify(e.message, priority='error')
return
envelope.add('From', from_header)
# continue to compose
ui.apply_command(ComposeCommand(envelope=envelope,
spawn=self.force_spawn))
@registerCommand(MODE, 'bounce')
class BounceMailCommand(Command):
"""directly re-send selected message"""
repeatable = True
def __init__(self, message=None, **kwargs):
"""
:param message: message to bounce (defaults to selected message)
:type message: `alot.db.message.Message`
"""
self.message = message
Command.__init__(self, **kwargs)
@inlineCallbacks
def apply(self, ui):
# get mail to bounce
if not self.message:
self.message = ui.current_buffer.get_selected_message()
mail = self.message.get_email()
# look if this makes sense: do we have any accounts set up?
my_accounts = settings.get_accounts()
if not my_accounts:
ui.notify('no accounts set', priority='error')
return
# remove "Resent-*" headers if already present
del mail['Resent-From']
del mail['Resent-To']
del mail['Resent-Cc']
del mail['Resent-Date']
del mail['Resent-Message-ID']
# set Resent-From-header and sending account
try:
resent_from_header, account = determine_sender(mail, 'bounce')
except AssertionError as e:
ui.notify(e.message, priority='error')
return
mail['Resent-From'] = resent_from_header
# set Reset-To
allbooks = not settings.get('complete_matching_abook_only')
logging.debug('allbooks: %s', allbooks)
if account is not None:
abooks = settings.get_addressbooks(order=[account],
append_remaining=allbooks)
logging.debug(abooks)
completer = ContactsCompleter(abooks)
else:
completer = None
to = yield ui.prompt('To', completer=completer)
if to is None:
raise CommandCanceled()
mail['Resent-To'] = to.strip(' \t\n,')
logging.debug("bouncing mail")
logging.debug(mail.__class__)
ui.apply_command(SendCommand(mail=mail))
@registerCommand(MODE, 'editnew', arguments=[
(['--spawn'], {'action': BooleanAction, 'default': None,
'help': 'open editor in new window'})])
class EditNewCommand(Command):
"""edit message in as new"""
def __init__(self, message=None, spawn=None, **kwargs):
"""
:param message: message to reply to (defaults to selected message)
:type message: `alot.db.message.Message`
:param spawn: force spawning of editor in a new terminal
:type spawn: bool
"""
self.message = message
self.force_spawn = spawn
Command.__init__(self, **kwargs)
def apply(self, ui):
if not self.message:
self.message = ui.current_buffer.get_selected_message()
mail = self.message.get_email()
# set body text
name, address = self.message.get_author()
mailcontent = self.message.accumulate_body()
envelope = Envelope(bodytext=mailcontent)
# copy selected headers
to_copy = ['Subject', 'From', 'To', 'Cc', 'Bcc', 'In-Reply-To',
'References']
for key in to_copy:
value = decode_header(mail.get(key, ''))
if value:
envelope.add(key, value)
# copy attachments
for b in self.message.get_attachments():
envelope.attach(b)
ui.apply_command(ComposeCommand(envelope=envelope,
spawn=self.force_spawn,
omit_signature=True))
@registerCommand(MODE, 'fold', forced={'visible': False}, arguments=[
(
['query'], {'help': 'query used to filter messages to affect',
'nargs': '*'}),
],
help='fold message(s)')
@registerCommand(MODE, 'unfold', forced={'visible': True}, arguments=[
(['query'], {'help': 'query used to filter messages to affect',
'nargs': '*'}),
], help='unfold message(s)')
@registerCommand(MODE, 'togglesource', forced={'raw': 'toggle'}, arguments=[
(['query'], {'help': 'query used to filter messages to affect',
'nargs': '*'}),
], help='display message source')
@registerCommand(MODE, 'toggleheaders', forced={'all_headers': 'toggle'},
arguments=[
(['query'], {
'help': 'query used to filter messages to affect',
'nargs': '*'}),
],
help='display all headers')
class ChangeDisplaymodeCommand(Command):
"""fold or unfold messages"""
repeatable = True
def __init__(self, query=None, visible=None, raw=None, all_headers=None,
**kwargs):
"""
:param query: notmuch query string used to filter messages to affect
:type query: str
:param visible: unfold if `True`, fold if `False`, ignore if `None`
:type visible: True, False, 'toggle' or None
:param raw: display raw message text.
:type raw: True, False, 'toggle' or None
:param all_headers: show all headers (only visible if not in raw mode)
:type all_headers: True, False, 'toggle' or None
"""
self.query = None
if query:
self.query = ' '.join(query)
self.visible = visible
self.raw = raw
self.all_headers = all_headers
Command.__init__(self, **kwargs)
def apply(self, ui):
tbuffer = ui.current_buffer
logging.debug('matching lines %s...' % (self.query))
if self.query is None:
messagetrees = [tbuffer.get_selected_messagetree()]
else:
messagetrees = tbuffer.messagetrees()
if self.query != '*':
def matches(msgt):
msg = msgt.get_message()
return msg.matches(self.query)
messagetrees = filter(matches, messagetrees)
for mt in messagetrees:
# determine new display values for this message
if self.visible == 'toggle':
visible = mt.is_collapsed(mt.root)
else:
visible = self.visible
if self.raw == 'toggle':
tbuffer.focus_selected_message()
raw = not mt.display_source if self.raw == 'toggle' else self.raw
all_headers = not mt.display_all_headers \
if self.all_headers == 'toggle' else self.all_headers
# collapse/expand depending on new 'visible' value
if visible is False:
mt.collapse(mt.root)
elif visible is True: # could be None
mt.expand(mt.root)
tbuffer.focus_selected_message()
# set new values in messagetree obj
if raw is not None:
mt.display_source = raw
if all_headers is not None:
mt.display_all_headers = all_headers
mt.debug()
# let the messagetree reassemble itself
mt.reassemble()
# refresh the buffer (clears Tree caches etc)
tbuffer.refresh()
@registerCommand(MODE, 'pipeto', arguments=[
(['cmd'], {'help': 'shellcommand to pipe to', 'nargs': '+'}),
(['--all'], {'action': 'store_true', 'help': 'pass all messages'}),
(['--format'], {'help': 'output format', 'default': 'raw',
'choices': ['raw', 'decoded', 'id', 'filepath']}),
(['--separately'], {'action': 'store_true',
'help': 'call command once for each message'}),
(['--background'], {'action': 'store_true',
'help': 'don\'t stop the interface'}),
(['--add_tags'], {'action': 'store_true',
'help': 'add \'Tags\' header to the message'}),
(['--shell'], {'action': 'store_true',
'help': 'let the shell interpret the command'}),
(['--notify_stdout'], {'action': 'store_true',
'help': 'display cmd\'s stdout as notification'}),
],
)
class PipeCommand(Command):
"""pipe message(s) to stdin of a shellcommand"""
repeatable = True
def __init__(self, cmd, all=False, separately=False, background=False,
shell=False, notify_stdout=False, format='raw',
add_tags=False, noop_msg='no command specified',
confirm_msg='', done_msg=None, **kwargs):
"""
:param cmd: shellcommand to open
:type cmd: str or list of str
:param all: pipe all, not only selected message
:type all: bool
:param separately: call command once per message
:type separately: bool
:param background: do not suspend the interface
:type background: bool
:param notify_stdout: display command\'s stdout as notification message
:type notify_stdout: bool
:param shell: let the shell interpret the command
:type shell: bool
'raw': message content as is,
'decoded': message content, decoded quoted printable,
'id': message ids, separated by newlines,
'filepath': paths to message files on disk
:type format: str
:param add_tags: add 'Tags' header to the message
:type add_tags: bool
:param noop_msg: error notification to show if `cmd` is empty
:type noop_msg: str
:param confirm_msg: confirmation question to ask (continues directly if
unset)
:type confirm_msg: str
:param done_msg: notification message to show upon success
:type done_msg: str
"""
Command.__init__(self, **kwargs)
if isinstance(cmd, unicode):
cmd = split_commandstring(cmd)
self.cmd = cmd
self.whole_thread = all
self.separately = separately
self.background = background
self.shell = shell
self.notify_stdout = notify_stdout
self.output_format = format
self.add_tags = add_tags
self.noop_msg = noop_msg
self.confirm_msg = confirm_msg
self.done_msg = done_msg
@inlineCallbacks
def apply(self, ui):
# abort if command unset
if not self.cmd:
ui.notify(self.noop_msg, priority='error')
return
# get messages to pipe
if self.whole_thread:
thread = ui.current_buffer.get_selected_thread()
if not thread:
return
to_print = thread.get_messages().keys()
else:
to_print = [ui.current_buffer.get_selected_message()]
# ask for confirmation if needed
if self.confirm_msg:
if (yield ui.choice(self.confirm_msg, select='yes',
cancel='no')) == 'no':
return
# prepare message sources
pipestrings = []
separator = '\n\n'
logging.debug('PIPETO format')
logging.debug(self.output_format)
if self.output_format == 'id':
pipestrings = [e.get_message_id() for e in to_print]
separator = '\n'
elif self.output_format == 'filepath':
pipestrings = [e.get_filename() for e in to_print]
separator = '\n'
else:
for msg in to_print:
mail = msg.get_email()
if self.add_tags:
mail['Tags'] = encode_header('Tags',
', '.join(msg.get_tags()))
if self.output_format == 'raw':
pipestrings.append(mail.as_string())
elif self.output_format == 'decoded':
headertext = extract_headers(mail)
bodytext = extract_body(mail)
msgtext = '%s\n\n%s' % (headertext, bodytext)
pipestrings.append(msgtext.encode('utf-8'))
if not self.separately:
pipestrings = [separator.join(pipestrings)]
if self.shell:
self.cmd = [' '.join(self.cmd)]
# do teh monkey
for mail in pipestrings:
if self.background:
logging.debug('call in background: %s' % str(self.cmd))
proc = subprocess.Popen(self.cmd,
shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate(mail)
if self.notify_stdout:
ui.notify(out)
else:
logging.debug('stop urwid screen')
ui.mainloop.screen.stop()
logging.debug('call: %s' % str(self.cmd))
# if proc.stdout is defined later calls to communicate
# seem to be non-blocking!
proc = subprocess.Popen(self.cmd, shell=True,
stdin=subprocess.PIPE,
# stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate(mail)
logging.debug('start urwid screen')
ui.mainloop.screen.start()
if err:
ui.notify(err, priority='error')
return
# display 'done' message
if self.done_msg:
ui.notify(self.done_msg)
@registerCommand(MODE, 'remove', arguments=[
(['--all'], {'action': 'store_true', 'help': 'remove whole thread'})])
class RemoveCommand(Command):
"""remove message(s) from the index"""
repeatable = True
def __init__(self, all=False, **kwargs):
"""
:param all: remove all messages from thread, not just selected one
:type all: bool
"""
Command.__init__(self, **kwargs)
self.all = all
@inlineCallbacks
def apply(self, ui):
threadbuffer = ui.current_buffer
# get messages and notification strings
if self.all:
thread = threadbuffer.get_selected_thread()
tid = thread.get_thread_id()
messages = thread.get_messages().keys()
confirm_msg = 'remove all messages in thread?'
ok_msg = 'removed all messages in thread: %s' % tid
else:
msg = threadbuffer.get_selected_message()
messages = [msg]
confirm_msg = 'remove selected message?'
ok_msg = 'removed message: %s' % msg.get_message_id()
# ask for confirmation
if (yield ui.choice(confirm_msg, select='yes', cancel='no')) == 'no':
return
# notify callback
def callback():
threadbuffer.rebuild()
ui.notify(ok_msg)
# remove messages
for m in messages:
ui.dbman.remove_message(m, afterwards=callback)
ui.apply_command(FlushCommand())
@registerCommand(MODE, 'print', arguments=[
(['--all'], {'action': 'store_true', 'help': 'print all messages'}),
(['--raw'], {'action': 'store_true', 'help': 'pass raw mail string'}),
(['--separately'], {'action': 'store_true',
'help': 'call print command once for each message'}),
(['--add_tags'], {'action': 'store_true',
'help': 'add \'Tags\' header to the message'}),
],
)
class PrintCommand(PipeCommand):
"""print message(s)"""
repeatable = True
def __init__(self, all=False, separately=False, raw=False, add_tags=False,
**kwargs):
"""
:param all: print all, not only selected messages
:type all: bool
:param separately: call print command once per message
:type separately: bool
:param raw: pipe raw message string to print command
:type raw: bool
:param add_tags: add 'Tags' header to the message
:type add_tags: bool
"""
# get print command
cmd = settings.get('print_cmd') or ''
# set up notification strings
if all:
confirm_msg = 'print all messages in thread?'
ok_msg = 'printed thread using %s' % cmd
else:
confirm_msg = 'print selected message?'
ok_msg = 'printed message using %s' % cmd
# no print cmd set
noop_msg = 'no print command specified. Set "print_cmd" in the '\
'global section.'
PipeCommand.__init__(self, [cmd], all=all, separately=separately,
background=True,
shell=False,
format='raw' if raw else 'decoded',
add_tags=add_tags,
noop_msg=noop_msg, confirm_msg=confirm_msg,
done_msg=ok_msg, **kwargs)
@registerCommand(MODE, 'save', arguments=[
(['--all'], {'action': 'store_true', 'help': 'save all attachments'}),
(['path'], {'nargs': '?', 'help': 'path to save to'})])
class SaveAttachmentCommand(Command):
"""save attachment(s)"""
def __init__(self, all=False, path=None, **kwargs):
"""
:param all: save all, not only selected attachment
:type all: bool
:param path: path to write to. if `all` is set, this must be a
directory.
:type path: str
"""
Command.__init__(self, **kwargs)
self.all = all
self.path = path
@inlineCallbacks
def apply(self, ui):
pcomplete = completion.PathCompleter()
savedir = settings.get('attachment_prefix', '~')
if self.all:
msg = ui.current_buffer.get_selected_message()
if not self.path:
self.path = yield ui.prompt('save attachments to',
text=os.path.join(savedir, ''),
completer=pcomplete)
if self.path:
if os.path.isdir(os.path.expanduser(self.path)):
for a in msg.get_attachments():
dest = a.save(self.path)
name = a.get_filename()
if name:
ui.notify('saved %s as: %s' % (name, dest))
else:
ui.notify('saved attachment as: %s' % dest)
else:
ui.notify('not a directory: %s' % self.path,
priority='error')
else:
raise CommandCanceled()
else: # save focussed attachment
focus = ui.get_deep_focus()
if isinstance(focus, AttachmentWidget):
attachment = focus.get_attachment()
filename = attachment.get_filename()
if not self.path:
msg = 'save attachment (%s) to ' % filename
initialtext = os.path.join(savedir, filename)
self.path = yield ui.prompt(msg,
completer=pcomplete,
text=initialtext)
if self.path:
try:
dest = attachment.save(self.path)
ui.notify('saved attachment as: %s' % dest)
except (IOError, OSError) as e:
ui.notify(str(e), priority='error')
else:
raise CommandCanceled()
class OpenAttachmentCommand(Command):
"""displays an attachment according to mailcap"""
def __init__(self, attachment, **kwargs):
"""
:param attachment: attachment to open
:type attachment: :class:`~alot.db.attachment.Attachment`
"""
Command.__init__(self, **kwargs)
self.attachment = attachment
def apply(self, ui):
logging.info('open attachment')
mimetype = self.attachment.get_content_type()
# returns pair of preliminary command string and entry dict containing
# more info. We only use the dict and construct the command ourselves
_, entry = settings.mailcap_find_match(mimetype)
if entry:
afterwards = None # callback, will rm tempfile if used
handler_stdin = None
tempfile_name = None
handler_raw_commandstring = entry['view']
# read parameter
part = self.attachment.get_mime_representation()
parms = tuple(map('='.join, part.get_params()))
# in case the mailcap defined command contains no '%s',
# we pipe the files content to the handling command via stdin
if '%s' in handler_raw_commandstring:
nametemplate = entry.get('nametemplate', '%s')
prefix, suffix = parse_mailcap_nametemplate(nametemplate)
tmpfile = tempfile.NamedTemporaryFile(delete=False,
prefix=prefix,
suffix=suffix)
tempfile_name = tmpfile.name
self.attachment.write(tmpfile)
tmpfile.close()
def afterwards():
os.unlink(tempfile_name)
else:
handler_stdin = StringIO()
self.attachment.write(handler_stdin)
# create handler command list
handler_cmd = mailcap.subst(handler_raw_commandstring, mimetype,
filename=tempfile_name, plist=parms)
handler_cmdlist = split_commandstring(handler_cmd)
# 'needsterminal' makes handler overtake the terminal
nt = entry.get('needsterminal', None)
overtakes = (nt is None)
ui.apply_command(ExternalCommand(handler_cmdlist,
stdin=handler_stdin,
on_success=afterwards,
thread=overtakes))
else:
ui.notify('unknown mime type')
@registerCommand(MODE, 'move', help='move focus in current buffer',
arguments=[(['movement'], {
'nargs': argparse.REMAINDER,
'help': 'up, down, page up, '
'page down, first, last'})])
class MoveFocusCommand(MoveCommand):
def apply(self, ui):
logging.debug(self.movement)
tbuffer = ui.current_buffer
if self.movement == 'parent':
tbuffer.focus_parent()
elif self.movement == 'first reply':
tbuffer.focus_first_reply()
elif self.movement == 'last reply':
tbuffer.focus_last_reply()
elif self.movement == 'next sibling':
tbuffer.focus_next_sibling()
elif self.movement == 'previous sibling':
tbuffer.focus_prev_sibling()
elif self.movement == 'next':
tbuffer.focus_next()
elif self.movement == 'previous':
tbuffer.focus_prev()
elif self.movement == 'next unfolded':
tbuffer.focus_next_unfolded()
elif self.movement == 'previous unfolded':
tbuffer.focus_prev_unfolded()
else:
MoveCommand.apply(self, ui)
# TODO add 'next matching' if threadbuffer stores the original query
# TODO: add next by date..
tbuffer.body.refresh()
@registerCommand(MODE, 'select')
class ThreadSelectCommand(Command):
"""select focussed element. The fired action depends on the focus:
- if message summary, this toggles visibility of the message,
- if attachment line, this opens the attachment"""
def apply(self, ui):
focus = ui.get_deep_focus()
if isinstance(focus, AttachmentWidget):
logging.info('open attachment')
ui.apply_command(OpenAttachmentCommand(focus.get_attachment()))
else:
ui.apply_command(ChangeDisplaymodeCommand(visible='toggle'))
@registerCommand(MODE, 'tag', forced={'action': 'add'}, arguments=[
(['--all'], {'action': 'store_true',
'help': 'tag all messages in thread'}),
(['--no-flush'], {'action': 'store_false', 'dest': 'flush',
'help': 'postpone a writeout to the index'}),
(['tags'], {'help': 'comma separated list of tags'})],
help='add tags to message(s)',
)
@registerCommand(MODE, 'retag', forced={'action': 'set'}, arguments=[
(['--all'], {'action': 'store_true',
'help': 'tag all messages in thread'}),
(['--no-flush'], {'action': 'store_false', 'dest': 'flush',
'help': 'postpone a writeout to the index'}),
(['tags'], {'help': 'comma separated list of tags'})],
help='set message(s) tags.',
)
@registerCommand(MODE, 'untag', forced={'action': 'remove'}, arguments=[
(['--all'], {'action': 'store_true',
'help': 'tag all messages in thread'}),
(['--no-flush'], {'action': 'store_false', 'dest': 'flush',
'help': 'postpone a writeout to the index'}),
(['tags'], {'help': 'comma separated list of tags'})],
help='remove tags from message(s)',
)
@registerCommand(MODE, 'toggletags', forced={'action': 'toggle'}, arguments=[
(['--all'], {'action': 'store_true',
'help': 'tag all messages in thread'}),
(['--no-flush'], {'action': 'store_false', 'dest': 'flush',
'help': 'postpone a writeout to the index'}),
(['tags'], {'help': 'comma separated list of tags'})],
help='flip presence of tags on message(s)',
)
class TagCommand(Command):
"""manipulate message tags"""
repeatable = True
def __init__(self, tags=u'', action='add', all=False, flush=True,
**kwargs):
"""
:param tags: comma separated list of tagstrings to set
:type tags: str
:param action: adds tags if 'add', removes them if 'remove', adds tags
and removes all other if 'set' or toggle individually if
'toggle'
:type action: str
:param all: tag all messages in thread
:type all: bool
:param flush: imediately write out to the index
:type flush: bool
"""
self.tagsstring = tags
self.all = all
self.action = action
self.flush = flush
Command.__init__(self, **kwargs)
def apply(self, ui):
tbuffer = ui.current_buffer
if self.all:
messagetrees = tbuffer.messagetrees()
else:
messagetrees = [tbuffer.get_selected_messagetree()]
def refresh_widgets():
for mt in messagetrees:
mt.refresh()
# put currently selected message id on a block list for the
# auto-remove-unread feature. This makes sure that explicit
# tag-unread commands for the current message are not undone on the
# next keypress (triggering the autorm again)...
mid = tbuffer.get_selected_mid()
tbuffer._auto_unread_dont_touch_mids.add(mid)
tbuffer.refresh()
tags = filter(lambda x: x, self.tagsstring.split(','))
try:
for mt in messagetrees:
m = mt.get_message()
if self.action == 'add':
m.add_tags(tags, afterwards=refresh_widgets)
if self.action == 'set':
m.add_tags(tags, afterwards=refresh_widgets,
remove_rest=True)
elif self.action == 'remove':
m.remove_tags(tags, afterwards=refresh_widgets)
elif self.action == 'toggle':
to_remove = []
to_add = []
for t in tags:
if t in m.get_tags():
to_remove.append(t)
else:
to_add.append(t)
m.remove_tags(to_remove)
m.add_tags(to_add, afterwards=refresh_widgets)
except DatabaseROError:
ui.notify('index in read-only mode', priority='error')
return
# flush index
if self.flush:
ui.apply_command(FlushCommand())
|
tlevine/alot
|
alot/commands/thread.py
|
Python
|
gpl-3.0
| 42,235
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import web
from karesansui.lib.rest import Rest, auth
from karesansui.lib.checker import Checker, \
CHECK_EMPTY, CHECK_VALID, CHECK_LENGTH, CHECK_CHAR
from karesansui.lib.utils import is_param, json_dumps
from karesansui.db.access.tag import findbyhost1guestall
class GuestTag(Rest):
@auth
def _GET(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
tags = findbyhost1guestall(self.orm, host_id)
if not tags:
self.logger.debug("No tags is found.")
return web.notfound()
if self.is_part() is True:
self.view.tags = tags
machine_ids = {}
for tag in tags:
tag_id = str(tag.id)
machine_ids[tag_id] = []
for machine in tag.machine:
if not machine.is_deleted:
machine_ids[tag_id].append("tag_machine%s"% machine.id)
machine_ids[tag_id] = " ".join(machine_ids[tag_id])
self.view.machine_ids = machine_ids
return True
elif self.is_json() is True:
tags_json = []
for tag in tags:
tags_json.append(tag.get_json(self.me.languages))
self.view.tags = json_dumps(tags_json)
return True
else:
return web.nomethod()
urls = (
'/host/(\d+)/guest/tag/?(\.part|\.json)$', GuestTag,
)
|
karesansui/karesansui
|
karesansui/gadget/guesttag.py
|
Python
|
mit
| 2,641
| 0.004165
|
# Copyright The IETF Trust 2008, All Rights Reserved
from django.conf.urls.defaults import patterns, include
from ietf.wginfo import views, edit, milestones
from django.views.generic.simple import redirect_to
urlpatterns = patterns('',
(r'^$', views.wg_dir),
(r'^summary.txt', redirect_to, { 'url':'/wg/1wg-summary.txt' }),
(r'^summary-by-area.txt', redirect_to, { 'url':'/wg/1wg-summary.txt' }),
(r'^summary-by-acronym.txt', redirect_to, { 'url':'/wg/1wg-summary-by-acronym.txt' }),
(r'^1wg-summary.txt', views.wg_summary_area),
(r'^1wg-summary-by-acronym.txt', views.wg_summary_acronym),
(r'^1wg-charters.txt', views.wg_charters),
(r'^1wg-charters-by-acronym.txt', views.wg_charters_by_acronym),
(r'^chartering/$', views.chartering_wgs),
(r'^bofs/$', views.bofs),
(r'^chartering/create/$', edit.edit, {'action': "charter"}, "wg_create"),
(r'^bofs/create/$', edit.edit, {'action': "create"}, "bof_create"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/documents/txt/$', views.wg_documents_txt),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/$', views.wg_documents_html, None, "wg_docs"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/charter/$', views.wg_charter, None, 'wg_charter'),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/init-charter/', edit.submit_initial_charter, None, "wg_init_charter"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/history/$', views.history),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/edit/$', edit.edit, {'action': "edit"}, "wg_edit"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/conclude/$', edit.conclude, None, "wg_conclude"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/milestones/$', milestones.edit_milestones, {'milestone_set': "current"}, "wg_edit_milestones"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/milestones/charter/$', milestones.edit_milestones, {'milestone_set': "charter"}, "wg_edit_charter_milestones"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/milestones/charter/reset/$', milestones.reset_charter_milestones, None, "wg_reset_charter_milestones"),
(r'^(?P<acronym>[a-zA-Z0-9-]+)/ajax/searchdocs/$', milestones.ajax_search_docs, None, "wg_ajax_search_docs"),
(r'^(?P<acronym>[^/]+)/management/', include('ietf.wgchairs.urls')),
)
|
mcr/ietfdb
|
ietf/wginfo/urls.py
|
Python
|
bsd-3-clause
| 2,177
| 0.009187
|
"""Admin API urls."""
from rest_framework import routers
from . import viewsets
router = routers.SimpleRouter()
router.register(r"domains", viewsets.DomainViewSet, basename="domain")
router.register(
r"domainaliases", viewsets.DomainAliasViewSet, basename="domain_alias")
router.register(r"accounts", viewsets.AccountViewSet, basename="account")
router.register(r"aliases", viewsets.AliasViewSet, basename="alias")
router.register(
r"senderaddresses", viewsets.SenderAddressViewSet, basename="sender_address")
urlpatterns = router.urls
|
modoboa/modoboa
|
modoboa/admin/api/v1/urls.py
|
Python
|
isc
| 548
| 0.001825
|
#
# Copyright (C) 2012-2014, Quarkslab.
#
# This file is part of qb-sync.
#
# qb-sync is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import socket
import select
import base64
import binascii
import re
import ConfigParser
import traceback
HOST = 'localhost'
PORT = 9100
try:
import json
except:
print "[-] failed to import json\n%s" % repr(sys.exc_info())
sys.exit(0)
class Client():
def __init__(self, s_client, s_srv, name):
self.client_sock = s_client
self.srv_sock = s_srv
self.name = name
self.enabled = False
self.buffer = ''
def close(self):
self.enabled = False
if self.client_sock:
self.client_sock.close()
if self.srv_sock:
self.srv_sock.close()
def feed(self, data):
batch = []
self.buffer = ''.join([self.buffer, data])
if self.buffer.endswith("\n"):
batch = [req for req in self.buffer.strip().split('\n') if req != '']
self.buffer = ''
return batch
class DispatcherSrv():
def __init__(self):
self.idb_clients = []
self.dbg_client = None
self.srv_socks = []
self.opened_socks = []
self.current_dbg = None
self.current_dialect = 'unknown'
self.current_idb = None
self.current_module = None
self.sync_mode_auto = True
self.pat = re.compile('dbg disconnected')
self.req_handlers = {
'new_client': self.req_new_client,
'new_dbg': self.req_new_dbg,
'dbg_quit': self.req_dbg_quit,
'idb_n': self.req_idb_n,
'idb_list': self.req_idb_list,
'module': self.req_module,
'sync_mode': self.req_sync_mode,
'cmd': self.req_cmd,
'bc': self.req_bc,
'kill': self.req_kill
}
def bind(self, host, port):
self.dbg_srv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.dbg_srv_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.dbg_srv_sock.bind((host, port))
self.srv_socks.append(self.dbg_srv_sock)
if not (socket.gethostbyname(host) == '127.0.0.1'):
self.localhost_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.localhost_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.localhost_sock.bind(('localhost', port))
self.srv_socks.append(self.localhost_sock)
def accept(self, s):
new_socket, addr = s.accept()
self.opened_socks.append(new_socket)
def listen(self):
for s in self.srv_socks:
s.listen(5)
def close(self, s):
s.close()
self.opened_socks.remove(s)
def loop(self):
self.listen()
self.announcement("dispatcher listening")
while True:
rlist, wlist, xlist = select.select(self.srv_socks + self.opened_socks, [], [])
if not rlist:
self.announcement("socket error: select")
raise Exception("rabbit eating the cable")
for s in rlist:
if s in self.srv_socks:
self.accept(s)
else:
self.handle(s)
def handle(self, s):
client = self.sock_to_client(s)
for req in self.recvall(client):
self.parse_exec(s, req)
# find client object for its srv socket
def sock_to_client(self, s):
if self.current_dbg and (s == self.current_dbg.srv_sock):
client = self.current_dbg
else:
clist = [client for client in self.idb_clients if (client.srv_sock == s)]
if not clist:
client = Client(None, s, None)
self.idb_clients.append(client)
else:
client = clist[0]
return client
# buffered readline like function
def recvall(self, client):
try:
data = client.srv_sock.recv(4096)
if data == '':
raise
except:
if client == self.current_dbg:
self.broadcast("debugger closed the connection")
self.dbg_quit()
else:
self.client_quit(client.srv_sock)
self.broadcast("a client quit, nb client(s) left: %d" % len(self.idb_clients))
return []
return client.feed(data)
# parse and execute requests from clients (idbs or dbg)
def parse_exec(self, s, req):
if not (req[0:8] == '[notice]'):
# this is a normal [sync] request from debugger, forward it
self.forward(req)
# receive 'dbg disconnected', socket can be closed
if re.search(self.pat, req):
self.close(s)
return
req = self.normalize(req, 8)
try:
hash = json.loads(req)
except:
print "[-] dispatcher failed to parse json\n %s\n" % req
return
type = hash['type']
if not type in self.req_handlers:
print ("[*] dispatcher unknown request: %s" % type)
return
req_handler = self.req_handlers[type]
req_handler(s, hash)
def normalize(self, req, taglen):
req = req[taglen:]
req = req.replace("\\", "\\\\")
req = req.replace("\n", "")
return req
def puts(self, msg, s):
s.sendall(msg)
# dispatcher announcements are forwarded to the idb
def announcement(self, msg, s=None):
if not s:
if not self.current_idb:
return
s = self.current_idb.client_sock
try:
s.sendall("[notice]{\"type\":\"dispatcher\",\"subtype\":\"msg\",\"msg\":\"%s\"}\n" % msg)
except:
return
# send message to all connected idb clients
def broadcast(self, msg):
for idbc in self.idb_clients:
self.announcement(msg, idbc.client_sock)
# send dbg message to currently active idb client
def forward(self, msg, s=None):
if not s:
if not self.current_idb:
return
s = self.current_idb.client_sock
if s:
s.sendall(msg + "\n")
# send dbg message to all idb clients
def forward_all(self, msg, s=None):
for idbc in self.idb_clients:
self.forward(msg, idbc.client_sock)
# disable current idb and enable new idb matched from current module name
def switch_idb(self, new_idb):
msg = "[sync]{\"type\":\"broker\",\"subtype\":\"%s\"}\n"
if (not self.current_idb == new_idb) & (self.current_idb.enabled):
self.current_idb.client_sock.sendall(msg % "disable_idb")
self.current_idb.enabled = False
if new_idb:
new_idb.client_sock.sendall(msg % "enable_idb")
self.current_idb = new_idb
new_idb.enabled = True
# a new idb client connects to the dispatcher via its broker
def req_new_client(self, srv_sock, hash):
port, name = hash['port'], hash['idb']
try:
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_sock.connect(('localhost', port))
self.opened_socks.append(client_sock)
except:
self.opened_socks.remove(srv_sock)
srv_sock.close()
return
# check if an idb client is already registered with the same name
conflicting = [client for client in self.idb_clients if (client.name == name)]
# promote to idb client
new_client = self.sock_to_client(srv_sock)
new_client.client_sock = client_sock
new_client.name = name
self.broadcast("add new client (listening on port %d), nb client(s): %d" % (port, len(self.idb_clients)))
if conflicting:
self.broadcast("conflicting name: %s !" % new_client.name)
if not self.current_idb:
self.current_idb = new_client
# if new client match current module name, then enable it
if self.current_module == name:
self.switch_idb(new_client)
# inform new client about debugger's dialect
self.dbg_dialect(new_client)
# clean state when a client is quiting
def client_quit(self, s):
self.opened_socks.remove(s)
# remove exiting client from the list of active clients
for idbc in [idbc for idbc in self.idb_clients if (idbc.srv_sock == s)]:
self.idb_clients.remove(idbc)
self.opened_socks.remove(idbc.client_sock)
idbc.close()
# no more clients, let's kill ourself
if not self.idb_clients:
for s in self.srv_socks:
s.close()
sys.exit()
# a new debugger client connects to the dispatcher
def req_new_dbg(self, s, hash):
msg = hash['msg']
if self.current_dbg:
self.dbg_quit()
# promote to dbg client
self.current_dbg = self.sock_to_client(s)
self.current_dbg.client_sock = s
self.idb_clients.remove(self.current_dbg)
self.broadcast("new debugger client: %s" % msg)
# store dbb's dialect
if 'dialect' in hash:
self.current_dialect = hash['dialect']
self.dbg_dialect()
# inform client about debugger's dialect
def dbg_dialect(self, client=None):
msg = "[sync]{\"type\":\"dialect\",\"dialect\":\"%s\"}\n" % self.current_dialect
if client:
client.client_sock.sendall(msg)
else:
for idbc in self.idb_clients:
idbc.client_sock.sendall(msg)
# debugger client disconnect from the dispatcher
def req_dbg_quit(self, s, hash):
msg = hash['msg']
self.broadcast("debugger quit: %s" % msg)
self.dbg_quit()
# clean state when debugger is quiting
def dbg_quit(self):
self.opened_socks.remove(self.current_dbg.srv_sock)
self.current_dbg.close()
self.current_dbg = None
self.current_module = None
self.switch_idb(None)
self.current_dialect = 'unknown'
# handle kill notice from a client, exit properly if no more client
def req_kill(self, s, hash):
self.client_quit(s)
self.broadcast("received a kill notice from client, %d client(s) left" % len(self.idb_clients))
# send list of currently connected idb clients
def req_idb_list(self, s, hash):
clist = "> currently connected idb(s):\n"
if not self.idb_clients:
clist += " no idb client yet\n"
else:
for i in range(len(self.idb_clients)):
clist += (" [%d] %s\n" % (i, self.idb_clients[i].name))
s.sendall(clist)
# manually set current active idb to idb n from idb list
def req_idb_n(self, s, hash):
idb = hash['idb']
try:
idbn = int(idb)
except:
s.sendall("> n should be a decimal value")
return
try:
idbc = self.idb_clients[idbn]
except:
s.sendall("> %d is invalid (see idblist)" % idbn)
return
self.switch_idb(idbc)
s.sendall("> current idb set to %d" % idbn)
# dbg notice that its current module has changed
def req_module(self, s, hash):
modpath = hash['path']
self.current_module = modname = os.path.basename(modpath)
matching = [idbc for idbc in self.idb_clients if (idbc.name.lower() == modname.lower())]
if not self.sync_mode_auto:
self.broadcast("sync_mode_auto off")
return
if len(matching) == 1:
# matched is set as active
self.switch_idb(matching[0])
else:
if not len(matching):
msg = "mod request has no match for %s"
else:
msg = "ambiguous mod request, too many matches for %s"
self.broadcast(msg % modname)
# no match current idb (if existing) is disabled
if self.current_idb.enabled:
self.switch_idb(None)
# sync mode tells if idb switch is automatic or manual
def req_sync_mode(self, s, hash):
mode = hash['auto']
self.broadcast("sync mode auto set to %s" % mode)
self.sync_mode_auto = (mode == "on")
# bc request should be forwarded to all idbs
def req_bc(self, s, hash):
msg = "[sync]%s" % json.dumps(hash)
self.forward_all(msg)
def req_cmd(self, s, hash):
cmd = hash['cmd']
self.current_dbg.client_sock.sendall("%s\n" % cmd)
def err_log(msg):
fd = open("%s.err" % __file__, 'w')
fd.write(msg)
fd.close()
if __name__ == "__main__":
server = DispatcherSrv()
for loc in ['IDB_PATH', 'USERPROFILE', 'HOME']:
if loc in os.environ:
confpath = os.path.join(os.path.realpath(os.environ[loc]), '.sync')
if os.path.exists(confpath):
config = ConfigParser.SafeConfigParser({'host': HOST, 'port': PORT})
config.read(confpath)
HOST = config.get("INTERFACE", 'host')
PORT = config.getint("INTERFACE", 'port')
server.announcement("configuration file loaded")
break
try:
server.bind(HOST, PORT)
except Exception as e:
err_log("dispatcher failed to bind on %s:%s\n-> %s" % (HOST, PORT, repr(e)))
sys.exit()
try:
server.loop()
except Exception as e:
err_log("dispatcher failed\n-> %s" % repr(e))
server.announcement("dispatcher stop")
|
quarkslab/qb-sync
|
ext_ida/dispatcher.py
|
Python
|
gpl-3.0
| 14,364
| 0.00181
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Partially based on AboutMethods in the Ruby Koans
#
from runner.koan import *
def my_global_function(a, b):
return a + b
class AboutMethods(Koan):
def test_calling_a_global_function(self):
self.assertEqual(5, my_global_function(2, 3))
# NOTE: Wrong number of arguments is not a SYNTAX error, but a
# runtime error.
def test_calling_functions_with_wrong_number_of_arguments(self):
try:
my_global_function()
except Exception as exception:
# NOTE: The .__name__ attribute will convert the class
# into a string value.
self.assertEqual(exception.__class__.__name__,
exception.__class__.__name__)
self.assertMatch(
r'my_global_function\(\) takes exactly 2 arguments \(0 given\)',
exception[0])
try:
my_global_function(1, 2, 3)
except Exception as e:
# Note, watch out for parenthesis. They need slashes in front!
self.assertMatch(r'my_global_function\(\) takes exactly 2 arguments \(3 given\)', e[0])
# ------------------------------------------------------------------
def pointless_method(self, a, b):
sum = a + b
def test_which_does_not_return_anything(self):
self.assertEqual(None, self.pointless_method(1, 2))
# Notice that methods accessed from class scope do not require
# you to pass the first "self" argument?
# ------------------------------------------------------------------
def method_with_defaults(self, a, b='default_value'):
return [a, b]
def test_calling_with_default_values(self):
self.assertEqual([1, 'default_value'], self.method_with_defaults(1))
self.assertEqual([1, 2], self.method_with_defaults(1, 2))
# ------------------------------------------------------------------
def method_with_var_args(self, *args):
return args
def test_calling_with_variable_arguments(self):
self.assertEqual((), self.method_with_var_args())
self.assertEqual(('one', ), self.method_with_var_args('one'))
self.assertEqual(('one', 'two'), self.method_with_var_args('one', 'two'))
# ------------------------------------------------------------------
def function_with_the_same_name(self, a, b):
return a + b
def test_functions_without_self_arg_are_global_functions(self):
def function_with_the_same_name(a, b):
return a * b
self.assertEqual(12, function_with_the_same_name(3, 4))
def test_calling_methods_in_same_class_with_explicit_receiver(self):
def function_with_the_same_name(a, b):
return a * b
self.assertEqual(7, self.function_with_the_same_name(3, 4))
# ------------------------------------------------------------------
def another_method_with_the_same_name(self):
return 10
link_to_overlapped_method = another_method_with_the_same_name
def another_method_with_the_same_name(self):
return 42
def test_that_old_methods_are_hidden_by_redefinitions(self):
self.assertEqual(42, self.another_method_with_the_same_name())
def test_that_overlapped_method_is_still_there(self):
self.assertEqual(10, self.link_to_overlapped_method())
# ------------------------------------------------------------------
def empty_method(self):
pass
def test_methods_that_do_nothing_need_to_use_pass_as_a_filler(self):
self.assertEqual(None, self.empty_method())
def test_pass_does_nothing_at_all(self):
"You"
"shall"
"not"
pass
self.assertEqual(True, "Still got to this line" != None)
# ------------------------------------------------------------------
def one_line_method(self): return 'Madagascar'
def test_no_indentation_required_for_one_line_statement_bodies(self):
self.assertEqual('Madagascar', self.one_line_method())
# ------------------------------------------------------------------
def method_with_documentation(self):
"A string placed at the beginning of a function is used for documentation"
return "ok"
def test_the_documentation_can_be_viewed_with_the_doc_method(self):
self.assertMatch("A string placed at the beginning of a function is used for documentation", self.method_with_documentation.__doc__)
# ------------------------------------------------------------------
class Dog(object):
def name(self):
return "Fido"
def _tail(self):
# Prefixing a method with an underscore implies private scope
return "wagging"
def __password(self):
return 'password' # Genius!
def test_calling_methods_in_other_objects(self):
rover = self.Dog()
self.assertEqual('Fido', rover.name())
def test_private_access_is_implied_but_not_enforced(self):
rover = self.Dog()
# This is a little rude, but legal
self.assertEqual('wagging', rover._tail())
def test_double_underscore_attribute_prefixes_cause_name_mangling(self):
"""Attributes names that start with a double underscore get
mangled when an instance is created."""
rover = self.Dog()
try:
#This may not be possible...
password = rover.__password()
except Exception as ex:
self.assertEqual('AttributeError', ex.__class__.__name__)
# But this still is!
self.assertEqual('password', rover._Dog__password())
# Name mangling exists to avoid name clash issues when subclassing.
# It is not for providing effective access protection
|
Turivniy/Python_koans
|
python2/koans/about_methods.py
|
Python
|
mit
| 5,806
| 0.001206
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_permissions
short_description: "Module to manage permissions of users/groups in oVirt/RHV"
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage permissions of users/groups in oVirt/RHV"
options:
role:
description:
- "Name of the role to be assigned to user/group on specific object."
default: UserRole
state:
description:
- "Should the permission be present/absent."
choices: ['present', 'absent']
default: present
object_id:
description:
- "ID of the object where the permissions should be managed."
object_name:
description:
- "Name of the object where the permissions should be managed."
object_type:
description:
- "The object where the permissions should be managed."
default: 'vm'
choices: [
'data_center',
'cluster',
'host',
'storage_domain',
'network',
'disk',
'vm',
'vm_pool',
'template',
'cpu_profile',
'disk_profile',
'vnic_profile',
'system',
]
user_name:
description:
- "Username of the user to manage. In most LDAPs it's I(uid) of the user,
but in Active Directory you must specify I(UPN) of the user."
- "Note that if user don't exist in the system this module will fail,
you should ensure the user exists by using M(ovirt_users) module."
group_name:
description:
- "Name of the group to manage."
- "Note that if group don't exist in the system this module will fail,
you should ensure the group exists by using M(ovirt_groups) module."
authz_name:
description:
- "Authorization provider of the user/group. In previous versions of oVirt/RHV known as domain."
required: true
aliases: ['domain']
namespace:
description:
- "Namespace of the authorization provider, where user/group resides."
required: false
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add user user1 from authorization provider example.com-authz
- ovirt_permissions:
user_name: user1
authz_name: example.com-authz
object_type: vm
object_name: myvm
role: UserVmManager
# Remove permission from user
- ovirt_permissions:
state: absent
user_name: user1
authz_name: example.com-authz
object_type: cluster
object_name: mycluster
role: ClusterAdmin
'''
RETURN = '''
id:
description: ID of the permission which is managed
returned: On success if permission is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
permission:
description: "Dictionary of all the permission attributes. Permission attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/permission."
returned: On success if permission is found.
type: dict
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
follow_link,
get_link_name,
ovirt_full_argument_spec,
search_by_attributes,
search_by_name,
)
def _objects_service(connection, object_type):
if object_type == 'system':
return connection.system_service()
return getattr(
connection.system_service(),
'%ss_service' % object_type,
None,
)()
def _object_service(connection, module):
object_type = module.params['object_type']
objects_service = _objects_service(connection, object_type)
if object_type == 'system':
return objects_service
object_id = module.params['object_id']
if object_id is None:
sdk_object = search_by_name(objects_service, module.params['object_name'])
if sdk_object is None:
raise Exception(
"'%s' object '%s' was not found." % (
module.params['object_type'],
module.params['object_name']
)
)
object_id = sdk_object.id
return objects_service.service(object_id)
def _permission(module, permissions_service, connection):
for permission in permissions_service.list():
user = follow_link(connection, permission.user)
if (
equal(module.params['user_name'], user.principal if user else None) and
equal(module.params['group_name'], get_link_name(connection, permission.group)) and
equal(module.params['role'], get_link_name(connection, permission.role))
):
return permission
class PermissionsModule(BaseModule):
def _user(self):
user = search_by_attributes(
self._connection.system_service().users_service(),
usrname="{name}@{authz_name}".format(
name=self._module.params['user_name'],
authz_name=self._module.params['authz_name'],
),
)
if user is None:
raise Exception("User '%s' was not found." % self._module.params['user_name'])
return user
def _group(self):
groups = self._connection.system_service().groups_service().list(
search="name={name}".format(
name=self._module.params['group_name'],
)
)
# If found more groups, filter them by namespace and authz name:
# (filtering here, as oVirt/RHV backend doesn't support it)
if len(groups) > 1:
groups = [
g for g in groups if (
equal(self._module.params['namespace'], g.namespace) and
equal(self._module.params['authz_name'], g.domain.name)
)
]
if not groups:
raise Exception("Group '%s' was not found." % self._module.params['group_name'])
return groups[0]
def build_entity(self):
entity = self._group() if self._module.params['group_name'] else self._user()
return otypes.Permission(
user=otypes.User(
id=entity.id
) if self._module.params['user_name'] else None,
group=otypes.Group(
id=entity.id
) if self._module.params['group_name'] else None,
role=otypes.Role(
name=self._module.params['role']
),
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
role=dict(default='UserRole'),
object_type=dict(
default='vm',
choices=[
'data_center',
'cluster',
'host',
'storage_domain',
'network',
'disk',
'vm',
'vm_pool',
'template',
'cpu_profile',
'disk_profile',
'vnic_profile',
'system',
]
),
authz_name=dict(required=True, aliases=['domain']),
object_id=dict(default=None),
object_name=dict(default=None),
user_name=dict(rdefault=None),
group_name=dict(default=None),
namespace=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
if (
(module.params['object_name'] is None and module.params['object_id'] is None)
and module.params['object_type'] != 'system'
):
module.fail_json(msg='"object_name" or "object_id" is required')
if module.params['user_name'] is None and module.params['group_name'] is None:
module.fail_json(msg='"user_name" or "group_name" is required')
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
permissions_service = _object_service(connection, module).permissions_service()
permissions_module = PermissionsModule(
connection=connection,
module=module,
service=permissions_service,
)
permission = _permission(module, permissions_service, connection)
state = module.params['state']
if state == 'present':
ret = permissions_module.create(entity=permission)
elif state == 'absent':
ret = permissions_module.remove(entity=permission)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
andreaso/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_permissions.py
|
Python
|
gpl-3.0
| 10,061
| 0.002286
|
#!/usr/bin/env python
# coding=utf-8
# code by kbdancer@92ez.com
from threading import Thread
from telnetlib import Telnet
import requests
import sqlite3
import queue
import time
import sys
import os
def ip2num(ip):
ip = [int(x) for x in ip.split('.')]
return ip[0] << 24 | ip[1] << 16 | ip[2] << 8 | ip[3]
def num2ip(num):
return '%s.%s.%s.%s' % ((num & 0xff000000) >> 24, (num & 0x00ff0000) >> 16, (num & 0x0000ff00) >> 8, num & 0x000000ff)
def ip_range(start, end):
return [num2ip(num) for num in range(ip2num(start), ip2num(end) + 1) if num & 0xff]
class Database:
db = sys.path[0] + "/TPLINK_KEY.db"
charset = 'utf8'
def __init__(self):
self.connection = sqlite3.connect(self.db)
self.connection.text_factory = str
self.cursor = self.connection.cursor()
def insert(self, query, params):
try:
self.cursor.execute(query, params)
self.connection.commit()
except Exception as e:
print(e)
self.connection.rollback()
def update(self, query, params):
try:
self.cursor.execute(query, params)
self.connection.commit()
except Exception as e:
print(e)
self.connection.rollback()
def query(self, query, params):
cursor = self.connection.cursor()
cursor.execute(query, params)
return cursor.fetchall()
def __del__(self):
self.connection.close()
def b_thread(ip_address_list):
thread_list = []
queue_list = queue.Queue()
hosts = ip_address_list
for host in hosts:
queue_list.put(host)
for x in range(0, int(sys.argv[1])):
thread_list.append(tThread(queue_list))
for t in thread_list:
try:
t.daemon = True
t.start()
except Exception as e:
print(e)
for t in thread_list:
t.join()
class tThread(Thread):
def __init__(self, queue_obj):
Thread.__init__(self)
self.queue = queue_obj
def run(self):
while not self.queue.empty():
host = self.queue.get()
try:
get_info(host)
except Exception as e:
print(e)
continue
def get_position_by_ip(host):
try:
ip_url = "http://ip-api.com/json/{ip}?lang=zh-CN".format(ip=host)
header = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0"}
json_data = requests.get(url=ip_url, headers=header, timeout=10).json()
info = [json_data.get("country"), json_data.get('regionName'), json_data.get('city'), json_data.get('isp')]
return info
except Exception as e:
print(e)
def get_info(host):
username = "admin"
password = "admin"
telnet_timeout = 15
cmd_timeout = 5
try:
t = Telnet(host, timeout=telnet_timeout)
t.read_until("username:", cmd_timeout)
t.write(username + "\n")
t.read_until("password:", cmd_timeout)
t.write(password + "\n")
t.write("wlctl show\n")
t.read_until("SSID", cmd_timeout)
wifi_str = t.read_very_eager()
t.write("lan show info\n")
t.read_until("MACAddress", cmd_timeout)
lan_str = t.read_very_eager()
t.close()
if len(wifi_str) > 0:
# clear extra space
wifi_str = "".join(wifi_str.split())
wifi_str = wifi_str
# get SID KEY MAC
wifi_ssid = wifi_str[1:wifi_str.find('QSS')]
wifi_key = wifi_str[wifi_str.find('Key=') + 4:wifi_str.find('cmd')] if wifi_str.find('Key=') != -1 else '无密码'
router_mac = lan_str[1:lan_str.find('__')].replace('\r\n', '')
current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
try:
my_sqlite_db = Database()
query_info = """select * from scanlog where ssid=? and key=? and mac=?"""
query_result = my_sqlite_db.query(query_info, [wifi_ssid, wifi_key, router_mac])
if len(query_result) < 1:
position_data = get_position_by_ip(host)
country = position_data[0]
province = position_data[1]
city = position_data[2]
isp = position_data[3]
insert_info = """INSERT INTO scanlog (`host`,`mac`,`ssid`,`wifikey`,`country`,`province`,`city`,`isp`) VALUES (?,?,?,?,?,?,?,?)"""
my_sqlite_db.insert(insert_info, [host, router_mac, wifi_ssid, wifi_key, country, province, city, isp])
print('[√] [%s] Info %s %s %s => Inserted!' % (current_time, host, wifi_ssid, wifi_key))
else:
print('[x] [%s] Found %s %s %s in DB, do nothing!' % (current_time, host, wifi_ssid, wifi_key))
except Exception as e:
print(e)
except Exception as e:
pass
if __name__ == '__main__':
print('==========================================')
print(' Scan TPLINK(MERCURY) wifi key by telnet')
print(' Author 92ez.com')
print('==========================================')
begin_ip = sys.argv[2].split('-')[0]
end_ip = sys.argv[2].split('-')[1]
ip_list = ip_range(begin_ip, end_ip)
current_pid = os.getpid()
print('\n[*] Total %d IP...' % len(ip_list))
print('\n================ Running =================')
try:
b_thread(ip_list)
except KeyboardInterrupt:
print('\n[*] Kill all thread.')
os.kill(current_pid, 9)
|
kbdancer/TPLINKKEY
|
scan.py
|
Python
|
mit
| 5,658
| 0.002301
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007,2008,2011,2012 Red Hat, Inc.
#
# Authors:
# Thomas Woerner <twoerner@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__all__ = [ "PY2", "getPortID", "getPortRange", "portStr", "getServiceName",
"checkIP", "checkIP6", "checkIPnMask", "checkIP6nMask",
"checkProtocol", "checkInterface", "checkUINT32",
"firewalld_is_active", "tempFile", "readfile", "writefile",
"enable_ip_forwarding", "get_nf_conntrack_helper_setting",
"set_nf_conntrack_helper_setting", "get_nf_conntrack_helpers",
"get_nf_nat_helpers", "check_port", "check_address",
"check_single_address", "check_mac", "uniqify", "ppid_of_pid",
"max_zone_name_len", "checkUser", "checkUid", "checkCommand",
"checkContext", "joinArgs", "splitArgs",
"b2u", "u2b", "u2b_if_py2" ]
import socket
import os
import os.path
import shlex
import pipes
import re
import string
import sys
import tempfile
from firewall.core.logger import log
from firewall.core.prog import runProg
from firewall.config import FIREWALLD_TEMPDIR, FIREWALLD_PIDFILE, COMMANDS
PY2 = sys.version < '3'
def getPortID(port):
""" Check and Get port id from port string or port id using socket.getservbyname
@param port port string or port id
@return Port id if valid, -1 if port can not be found and -2 if port is too big
"""
if isinstance(port, int):
_id = port
else:
if port:
port = port.strip()
try:
_id = int(port)
except ValueError:
try:
_id = socket.getservbyname(port)
except socket.error:
return -1
if _id > 65535:
return -2
return _id
def getPortRange(ports):
""" Get port range for port range string or single port id
@param ports an integer or port string or port range string
@return Array containing start and end port id for a valid range or -1 if port can not be found and -2 if port is too big for integer input or -1 for invalid ranges or None if the range is ambiguous.
"""
# "<port-id>" case
if isinstance(ports, int) or ports.isdigit():
id1 = getPortID(ports)
if id1 >= 0:
return (id1,)
return id1
splits = ports.split("-")
# "<port-id>-<port-id>" case
if len(splits) == 2 and splits[0].isdigit() and splits[1].isdigit():
id1 = getPortID(splits[0])
id2 = getPortID(splits[1])
if id1 >= 0 and id2 >= 0:
if id1 < id2:
return (id1, id2)
elif id1 > id2:
return (id2, id1)
else: # ids are the same
return (id1,)
# everything else "<port-str>[-<port-str>]"
matched = [ ]
for i in range(len(splits), 0, -1):
id1 = getPortID("-".join(splits[:i]))
port2 = "-".join(splits[i:])
if len(port2) > 0:
id2 = getPortID(port2)
if id1 >= 0 and id2 >= 0:
if id1 < id2:
matched.append((id1, id2))
elif id1 > id2:
matched.append((id2, id1))
else:
matched.append((id1, ))
else:
if id1 >= 0:
matched.append((id1,))
if i == len(splits):
# full match, stop here
break
if len(matched) < 1:
return -1
elif len(matched) > 1:
return None
return matched[0]
def portStr(port, delimiter=":"):
""" Create port and port range string
@param port port or port range int or [int, int]
@param delimiter of the output string for port ranges, default ':'
@return Port or port range string, empty string if port isn't specified, None if port or port range is not valid
"""
if port == "":
return ""
_range = getPortRange(port)
if isinstance(_range, int) and _range < 0:
return None
elif len(_range) == 1:
return "%s" % _range
else:
return "%s%s%s" % (_range[0], delimiter, _range[1])
def portInPortRange(port, range):
_port = getPortID(port)
_range = getPortRange(range)
if len(_range) == 1:
return _port == getPortID(_range[0])
if len(_range) == 2 and \
_port >= getPortID(_range[0]) and _port <= getPortID(_range[1]):
return True
return False
def getServiceName(port, proto):
""" Check and Get service name from port and proto string combination using socket.getservbyport
@param port string or id
@param protocol string
@return Service name if port and protocol are valid, else None
"""
try:
name = socket.getservbyport(int(port), proto)
except socket.error:
return None
return name
def checkIP(ip):
""" Check IPv4 address.
@param ip address string
@return True if address is valid, else False
"""
try:
socket.inet_pton(socket.AF_INET, ip)
except socket.error:
return False
return True
def checkIP6(ip):
""" Check IPv6 address.
@param ip address string
@return True if address is valid, else False
"""
try:
socket.inet_pton(socket.AF_INET6, ip)
except socket.error:
return False
return True
def checkIPnMask(ip):
if "/" in ip:
addr = ip[:ip.index("/")]
mask = ip[ip.index("/")+1:]
if len(addr) < 1 or len(mask) < 1:
return False
else:
addr = ip
mask = None
if not checkIP(addr):
return False
if mask:
if "." in mask:
return checkIP(mask)
else:
try:
i = int(mask)
except ValueError:
return False
if i < 0 or i > 32:
return False
return True
def checkIP6nMask(ip):
if "/" in ip:
addr = ip[:ip.index("/")]
mask = ip[ip.index("/")+1:]
if len(addr) < 1 or len(mask) < 1:
return False
else:
addr = ip
mask = None
if not checkIP6(addr):
return False
if mask:
try:
i = int(mask)
except ValueError:
return False
if i < 0 or i > 128:
return False
return True
def checkProtocol(protocol):
try:
i = int(protocol)
except ValueError:
# string
try:
socket.getprotobyname(protocol)
except socket.error:
return False
else:
if i < 0 or i > 255:
return False
return True
def checkInterface(iface):
""" Check interface string
@param interface string
@return True if interface is valid (maximum 16 chars and does not contain ' ', '/', '!', ':', '*'), else False
"""
if not iface or len(iface) > 16:
return False
for ch in [ ' ', '/', '!', '*' ]:
# !:* are limits for iptables <= 1.4.5
if ch in iface:
return False
# disabled old iptables check
#if iface == "+":
# # limit for iptables <= 1.4.5
# return False
return True
def checkUINT32(val):
try:
x = int(val, 0)
except ValueError:
return False
else:
if x >= 0 and x <= 4294967295:
return True
return False
def firewalld_is_active():
""" Check if firewalld is active
@return True if there is a firewalld pid file and the pid is used by firewalld
"""
if not os.path.exists(FIREWALLD_PIDFILE):
return False
try:
with open(FIREWALLD_PIDFILE, "r") as fd:
pid = fd.readline()
except Exception:
return False
if not os.path.exists("/proc/%s" % pid):
return False
try:
with open("/proc/%s/cmdline" % pid, "r") as fd:
cmdline = fd.readline()
except Exception:
return False
if "firewalld" in cmdline:
return True
return False
def tempFile():
try:
if not os.path.exists(FIREWALLD_TEMPDIR):
os.mkdir(FIREWALLD_TEMPDIR, 0o750)
return tempfile.NamedTemporaryFile(mode='wt', prefix="temp.",
dir=FIREWALLD_TEMPDIR, delete=False)
except Exception as msg:
log.error("Failed to create temporary file: %s" % msg)
raise
return None
def readfile(filename):
try:
with open(filename, "r") as f:
return f.readlines()
except Exception as e:
log.error('Failed to read file "%s": %s' % (filename, e))
return None
def writefile(filename, line):
try:
with open(filename, "w") as f:
f.write(line)
except Exception as e:
log.error('Failed to write to file "%s": %s' % (filename, e))
return False
return True
def enable_ip_forwarding(ipv):
if ipv == "ipv4":
return writefile("/proc/sys/net/ipv4/ip_forward", "1\n")
elif ipv == "ipv6":
return writefile("/proc/sys/net/ipv6/conf/all/forwarding", "1\n")
return False
def get_modinfos(path_templates, prefix):
kver = os.uname()[2]
modules = []
for path in (t % kver for t in path_templates):
if os.path.isdir(path):
for filename in sorted(os.listdir(path)):
if filename.startswith(prefix):
modules.append(filename.split(".")[0])
if modules:
# Ignore status as it is not 0 if even one module had problems
(status, ret) = runProg(COMMANDS["modinfo"], modules)
entry = {}
for m in re.finditer(r"^(\w+):[ \t]*(\S.*?)[ \t]*$", ret, re.MULTILINE):
key, value = m.groups()
# Assume every entry starts with filename
if key == "filename" and "filename" in entry:
yield entry
entry = {}
entry.setdefault(key, [ ]).append(value)
if "filename" in entry:
yield entry
def get_nf_conntrack_helpers():
helpers = { }
for modinfo in get_modinfos(["/lib/modules/%s/kernel/net/netfilter/"], "nf_conntrack_"):
filename = modinfo['filename'][0].split("/")[-1]
name = filename.split(".")[0]
# If module name matches "nf_conntrack_proto_*"
# the we add it to helpers list and goto next module
if filename.startswith("nf_conntrack_proto_"):
helper = name
helper = helper.replace("_", "-")
helper = helper.replace("nf-conntrack-", "")
helpers.setdefault(name, [ ]).append(helper)
continue
# Else we get module alias and if "-helper" in the "alias:" line of modinfo
# then we add it to helpers list and goto next module
if "alias" in modinfo:
for helper in modinfo["alias"]:
if "-helper-" in helper:
helper = helper.replace("nfct-helper-", "")
helper = helper.replace("_", "-")
helpers.setdefault(name, [ ]).append(helper)
return helpers
def get_nf_nat_helpers():
helpers = { }
for modinfo in get_modinfos(["/lib/modules/%s/kernel/net/netfilter/",
"/lib/modules/%s/kernel/net/ipv4/netfilter/",
"/lib/modules/%s/kernel/net/ipv6/netfilter/"], "nf_nat_"):
filename = modinfo['filename'][0].split("/")[-1]
name = filename.split(".")[0]
helper = name
helper = helper.replace("_", "-")
helper = helper.replace("nf-nat-", "")
# If module name matches "nf_nat_proto_*"
# the we add it to helpers list and goto next module
if filename.startswith("nf_nat_proto_"):
helpers.setdefault(name, [ ]).append(helper)
continue
# Else we get module alias and if "NAT helper" in "description:" line of modinfo
# then we add it to helpers list and goto next module
if "description" in modinfo and "NAT helper" in modinfo["description"][0]:
helpers.setdefault(name, [ ]).append(helper)
return helpers
def get_nf_conntrack_helper_setting():
try:
return int(readfile("/proc/sys/net/netfilter/nf_conntrack_helper")[0])
except Exception:
log.warning("Failed to get and parse nf_conntrack_helper setting")
return 0
def set_nf_conntrack_helper_setting(flag):
return writefile("/proc/sys/net/netfilter/nf_conntrack_helper",
"1\n" if flag else "0\n")
def check_port(port):
_range = getPortRange(port)
if _range == -2 or _range == -1 or _range is None or \
(len(_range) == 2 and _range[0] >= _range[1]):
if _range == -2:
log.debug2("'%s': port > 65535" % port)
elif _range == -1:
log.debug2("'%s': port is invalid" % port)
elif _range is None:
log.debug2("'%s': port is ambiguous" % port)
elif len(_range) == 2 and _range[0] >= _range[1]:
log.debug2("'%s': range start >= end" % port)
return False
return True
def check_address(ipv, source):
if ipv == "ipv4":
return checkIPnMask(source)
elif ipv == "ipv6":
return checkIP6nMask(source)
else:
return False
def check_single_address(ipv, source):
if ipv == "ipv4":
return checkIP(source)
elif ipv == "ipv6":
return checkIP6(source)
else:
return False
def check_mac(mac):
if len(mac) == 12+5:
# 0 1 : 3 4 : 6 7 : 9 10 : 12 13 : 15 16
for i in (2, 5, 8, 11, 14):
if mac[i] != ":":
return False
for i in (0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16):
if mac[i] not in string.hexdigits:
return False
return True
return False
def uniqify(_list):
# removes duplicates from list, whilst preserving order
output = []
for x in _list:
if x not in output:
output.append(x)
return output
def ppid_of_pid(pid):
""" Get parent for pid """
try:
f = os.popen("ps -o ppid -h -p %d 2>/dev/null" % pid)
pid = int(f.readlines()[0].strip())
f.close()
except Exception:
return None
return pid
def max_zone_name_len():
"""
Netfilter limits length of chain to (currently) 28 chars.
The longest chain we create is FWDI_<zone>_allow,
which leaves 28 - 11 = 17 chars for <zone>.
"""
from firewall.core.base import SHORTCUTS
longest_shortcut = max(map(len, SHORTCUTS.values()))
return 28 - (longest_shortcut + len("__allow"))
def checkUser(user):
if len(user) < 1 or len(user) > os.sysconf('SC_LOGIN_NAME_MAX'):
return False
for c in user:
if c not in string.ascii_letters and \
c not in string.digits and \
c not in [ ".", "-", "_", "$" ]:
return False
return True
def checkUid(uid):
if isinstance(uid, str):
try:
uid = int(uid)
except ValueError:
return False
if uid >= 0 and uid <= 2**31-1:
return True
return False
def checkCommand(command):
if len(command) < 1 or len(command) > 1024:
return False
for ch in [ "|", "\n", "\0" ]:
if ch in command:
return False
if command[0] != "/":
return False
return True
def checkContext(context):
splits = context.split(":")
if len(splits) not in [4, 5]:
return False
# user ends with _u if not root
if splits[0] != "root" and splits[0][-2:] != "_u":
return False
# role ends with _r
if splits[1][-2:] != "_r":
return False
# type ends with _t
if splits[2][-2:] != "_t":
return False
# level might also contain :
if len(splits[3]) < 1:
return False
return True
def joinArgs(args):
if "quote" in dir(shlex):
return " ".join(shlex.quote(a) for a in args)
else:
return " ".join(pipes.quote(a) for a in args)
def splitArgs(_string):
if PY2 and isinstance(_string, unicode): # noqa: F821
# Python2's shlex doesn't like unicode
_string = u2b(_string)
splits = shlex.split(_string)
return map(b2u, splits)
else:
return shlex.split(_string)
def b2u(_string):
""" bytes to unicode """
if isinstance(_string, bytes):
return _string.decode('UTF-8', 'replace')
return _string
def u2b(_string):
""" unicode to bytes """
if not isinstance(_string, bytes):
return _string.encode('UTF-8', 'replace')
return _string
def u2b_if_py2(_string):
""" unicode to bytes only if Python 2"""
if PY2 and isinstance(_string, unicode): # noqa: F821
return _string.encode('UTF-8', 'replace')
return _string
|
hos7ein/firewalld
|
src/firewall/functions.py
|
Python
|
gpl-2.0
| 17,408
| 0.004193
|
import math
def OUT_CIRC(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t = t / d - 1
return c * math.sqrt(1 - t * t) + b
def OUT_QUART(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t = t / d - 1
return -c * (t * t * t * t - 1) + b
def INOUT_CIRC(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t1 = t / (d / 2)
if (t / (d / 2)) < 1:
return -c / 2 * (math.sqrt(1 - (t / (d / 2)) ** 2) - 1) + b
else:
return c / 2 * (math.sqrt(1 - (t1 - 2) ** 2) + 1) + b
def IN_CUBIC(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t /= d
return c * t * t * t + b
def OUT_QUAD(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t /= d
return -c * t * (t - 2) + b
def OUT_BOUNCE(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t /= d
if t < (1 / 2.75):
return c * (7.5625 * t * t) + b
elif t < (2 / 2.75):
t -= (1.5 / 2.75)
return c * (7.5625 * t * t + .75) + b
elif t < (2.5 / 2.75):
t -= (2.25 / 2.75)
return c * (7.5625 * t * t + .9375) + b
else:
t -= (2.625 / 2.75)
return c * (7.5625 * t * t + .984375) + b
def INOUT_EXP(t, b, c, d):
t = float(t)
b = float(b)
c = float(c)
d = float(d)
t1 = t / (d / 2)
if t == 0:
return b
elif t == d:
return b + c
elif t1 < 1:
return c / 2 * math.pow(2, 10 * (t1 - 1)) + b - c * 0.0005
else:
return c / 2 * 1.0005 * (-math.pow(2, -10 * (t1 - 1)) + 2) + b
|
pyfa-org/Pyfa
|
gui/utils/anim_effects.py
|
Python
|
gpl-3.0
| 1,706
| 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Internet Explorer WebCache database."""
import unittest
from plaso.lib import definitions
from plaso.parsers.esedb_plugins import msie_webcache
from tests.parsers.esedb_plugins import test_lib
class MsieWebCacheESEDBPluginTest(test_lib.ESEDBPluginTestCase):
"""Tests for the MSIE WebCache ESE database plugin."""
# pylint: disable=protected-access
def testConvertHeadersValues(self):
"""Tests the _ConvertHeadersValues function."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
binary_value = (
b'HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n'
b'X-Content-Type-Options: nosniff\r\nContent-Length: 2759\r\n'
b'X-XSS-Protection: 1; mode=block\r\n'
b'Alternate-Protocol: 80:quic\r\n\r\n')
expected_headers_value = (
'[HTTP/1.1 200 OK; Content-Type: image/png; '
'X-Content-Type-Options: nosniff; Content-Length: 2759; '
'X-XSS-Protection: 1; mode=block; '
'Alternate-Protocol: 80:quic]')
headers_value = plugin._ConvertHeadersValues(binary_value)
self.assertEqual(headers_value, expected_headers_value)
def testProcessOnDatabaseWithPartitionsTable(self):
"""Tests the Process function on database with a Partitions table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(['WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_events, 1354)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# The order in which ESEDBPlugin._GetRecordValues() generates events is
# nondeterministic hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'container_identifier': 1,
'data_type': 'msie:webcache:containers',
'date_time': '2014-05-12 07:30:25.4861987',
'directory': (
'C:\\Users\\test\\AppData\\Local\\Microsoft\\Windows\\'
'INetCache\\IE\\'),
'name': 'Content',
'set_identifier': 0,
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS}
self.CheckEventValues(storage_writer, events[567], expected_event_values)
def testProcessOnDatabaseWithPartitionsExTable(self):
"""Tests the Process function on database with a PartitionsEx table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(
['PartitionsEx-WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_events, 4014)
self.assertEqual(storage_writer.number_of_extraction_warnings, 3)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# The order in which ESEDBPlugin._GetRecordValues() generates events is
# nondeterministic hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'access_count': 5,
'cache_identifier': 0,
'cached_file_size': 726,
'cached_filename': 'b83d57c0[1].svg',
'container_identifier': 14,
'data_type': 'msie:webcache:container',
'date_time': '2019-03-20 17:22:14.0000000',
'entry_identifier': 63,
'sync_count': 0,
'response_headers': (
'[HTTP/1.1 200; content-length: 726; content-type: image/svg+xml; '
'x-cache: TCP_HIT; x-msedge-ref: Ref A: 3CD5FCBC8EAD4E0A80FA41A62'
'FBC8CCC Ref B: PRAEDGE0910 Ref C: 2019-12-16T20:55:28Z; date: '
'Mon, 16 Dec 2019 20:55:28 GMT]'),
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'url': 'https://www.bing.com/rs/3R/kD/ic/878ca0cd/b83d57c0.svg'}
self.CheckEventValues(storage_writer, events[100], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
kiddinn/plaso
|
tests/parsers/esedb_plugins/msie_webcache.py
|
Python
|
apache-2.0
| 3,923
| 0.001784
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing serializable datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.data.experimental.ops import iterator_ops as contrib_iterator_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util import nest
def remove_variants(get_next_op):
# TODO(b/72408568): Remove this once session.run can get
# variant tensors.
"""Remove variants from a nest structure, so sess.run will execute."""
def _remove_variant(x):
if isinstance(x, ops.Tensor) and x.dtype == dtypes.variant:
return ()
else:
return x
return nest.map_structure(_remove_variant, get_next_op)
class DatasetSerializationTestBase(test.TestCase):
"""Base class for testing serializable datasets."""
def tearDown(self):
self._delete_ckpt()
# TODO(b/72657739): Remove sparse_tensor argument, which is to test the
# (deprecated) saveable `SparseTensorSliceDataset`, once the API
# `from_sparse_tensor_slices()`and related tests are deleted.
def run_core_tests(self, ds_fn1, ds_fn2, num_outputs, sparse_tensors=False):
"""Runs the core tests.
Args:
ds_fn1: 0-argument function that returns a Dataset.
ds_fn2: 0-argument function that returns a Dataset different from
ds_fn1. If None, verify_restore_in_modified_graph test is not run.
num_outputs: Total number of outputs expected from this Dataset.
sparse_tensors: Whether dataset is built from SparseTensor(s).
Raises:
AssertionError if any test fails.
"""
# NOTE: We disable all default optimizations in serialization tests in order
# to test the actual dataset in question.
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
def ds_fn1_no_opt():
return ds_fn1().with_options(options)
self.verify_unused_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_fully_used_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_exhausted_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_init_before_restore(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_multiple_breaks(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_reset_restored_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_restore_in_empty_graph(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
if ds_fn2:
def ds_fn2_no_opt():
return ds_fn2().with_options(options)
self.verify_restore_in_modified_graph(
ds_fn1_no_opt,
ds_fn2_no_opt,
num_outputs,
sparse_tensors=sparse_tensors)
def verify_unused_iterator(self,
ds_fn,
num_outputs,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that saving and restoring an unused iterator works.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn, [0],
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_fully_used_iterator(self, ds_fn, num_outputs,
sparse_tensors=False):
"""Verifies that saving and restoring a fully used iterator works.
Note that this only checks saving and restoring an iterator from which
`num_outputs` items have been produced but does not check for an
exhausted iterator, i.e., one from which an OutOfRange error has been
returned.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if test fails.
"""
self.verify_run_with_breaks(
ds_fn, [num_outputs], num_outputs, sparse_tensors=sparse_tensors)
def verify_exhausted_iterator(self, ds_fn, num_outputs, sparse_tensors=False):
"""Verifies that saving and restoring an exhausted iterator works.
An exhausted iterator is one which has returned an OutOfRange error.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if any test fails.
"""
self.gen_outputs(
ds_fn, [],
num_outputs,
verify_exhausted=True,
sparse_tensors=sparse_tensors)
actual = self.gen_outputs(
ds_fn, [],
0,
ckpt_saved=True,
verify_exhausted=True,
sparse_tensors=sparse_tensors)
self.assertEqual(len(actual), 0)
def verify_init_before_restore(self,
ds_fn,
num_outputs,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that restoring into an already initialized iterator works.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn,
self.gen_break_points(num_outputs),
num_outputs,
init_before_restore=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_multiple_breaks(self,
ds_fn,
num_outputs,
num_breaks=10,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to save/restore at multiple break points.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
num_breaks: The number of break points. These are uniformly spread in
[0, num_outputs] both inclusive.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn,
self.gen_break_points(num_outputs, num_breaks),
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_reset_restored_iterator(self,
ds_fn,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to re-initialize a restored iterator.
This is useful when restoring a training checkpoint during validation.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Collect ground truth containing all outputs.
expected = self.gen_outputs(
ds_fn, [],
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Skip some items and save checkpoint.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Restore from checkpoint and then run init_op.
with ops.Graph().as_default() as g:
saver = self._import_meta_graph()
init_op, get_next_op = self._get_iterator_ops_from_collection(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._restore(saver, sess)
self._initialize(init_op, sess)
for _ in range(num_outputs):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_restore_in_modified_graph(self,
ds_fn1,
ds_fn2,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to restore an iterator in a modified graph.
Builds an input pipeline using ds_fn1, runs it for `break_point` steps
and saves a checkpoint. Then builds a new graph using ds_fn2, restores
the checkpoint from ds_fn1 and verifies that the restore is successful.
Args:
ds_fn1: See `run_core_tests`.
ds_fn2: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Skip `break_point` items and store the remaining produced from ds_fn1
# in `expected`.
self.gen_outputs(
ds_fn1, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
expected = self.gen_outputs(
ds_fn1, [],
num_outputs - break_point,
ckpt_saved=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Generate `break_point` items from ds_fn1 and save checkpoint.
self.gen_outputs(
ds_fn1, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Build graph for ds_fn2 but load checkpoint for ds_fn1.
with ops.Graph().as_default() as g:
_, get_next_op, saver = self._build_graph(
ds_fn2, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._restore(saver, sess)
for _ in range(num_outputs - break_point):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_restore_in_empty_graph(self,
ds_fn,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to restore an iterator in an empty graph.
Builds an input pipeline using ds_fn, runs it for `break_point` steps
and saves a checkpoint. Then builds a new empty graph, restores
the checkpoint from ds_fn and verifies that the restore is successful.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Skip `break_point` items and store the remaining produced from ds_fn
# in `expected`.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
expected = self.gen_outputs(
ds_fn, [],
num_outputs - break_point,
ckpt_saved=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Generate `break_point` items from ds_fn and save checkpoint.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Build an empty graph but load checkpoint for ds_fn.
with ops.Graph().as_default() as g:
get_next_op, saver = self._build_empty_graph(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._restore(saver, sess)
for _ in range(num_outputs - break_point):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_error_on_save(self,
ds_fn,
num_outputs,
error,
break_point=None,
sparse_tensors=False):
"""Attempts to save a non-saveable iterator.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
error: Declared error when trying to save iterator.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = self._build_graph(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._initialize(init_op, sess)
for _ in range(break_point):
sess.run(get_next_op)
with self.assertRaises(error):
self._save(sess, saver)
def verify_run_with_breaks(self,
ds_fn,
break_points,
num_outputs,
init_before_restore=False,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that ds_fn() produces the same outputs with and without breaks.
1. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it
*without* stopping at break points.
2. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it
with stopping at break points.
Deep matches outputs from 1 and 2.
Args:
ds_fn: See `gen_outputs`.
break_points: See `gen_outputs`.
num_outputs: See `gen_outputs`.
init_before_restore: See `gen_outputs`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
expected = self.gen_outputs(
ds_fn, [],
num_outputs,
init_before_restore=init_before_restore,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
actual = self.gen_outputs(
ds_fn,
break_points,
num_outputs,
init_before_restore=init_before_restore,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
self.match(expected, actual)
def gen_outputs(self,
ds_fn,
break_points,
num_outputs,
ckpt_saved=False,
init_before_restore=False,
sparse_tensors=False,
verify_exhausted=True,
save_checkpoint_at_end=True):
"""Generates elements from input dataset while stopping at break points.
Produces `num_outputs` outputs and saves the state of the iterator in the
Saver checkpoint.
Args:
ds_fn: 0-argument function that returns the dataset.
break_points: A list of integers. For each `break_point` in
`break_points`, we produce outputs till `break_point` number of items
have been produced and then checkpoint the state. The current graph
and session are destroyed and a new graph and session are used to
produce outputs till next checkpoint or till `num_outputs` elements
have been produced. `break_point` must be <= `num_outputs`.
num_outputs: The total number of outputs to produce from the iterator.
ckpt_saved: Whether a checkpoint already exists. If False, we build the
graph from ds_fn.
init_before_restore: Whether init should be called before saver.restore.
This is just so that we can verify that restoring an already initialized
iterator works.
sparse_tensors: Whether dataset is built from SparseTensor(s).
verify_exhausted: Whether to verify that the iterator has been exhausted
after producing `num_outputs` elements.
save_checkpoint_at_end: Whether to save a checkpoint after producing all
outputs. If False, checkpoints are saved each break point but not at the
end. Note that checkpoints overwrite each other so there is always only
a single checkpoint available. Defaults to True.
Returns:
A list of `num_outputs` items.
"""
outputs = []
def get_ops():
if ckpt_saved:
saver = self._import_meta_graph()
init_op, get_next_op = self._get_iterator_ops_from_collection(
ds_fn, sparse_tensors=sparse_tensors)
else:
init_op, get_next_op, saver = self._build_graph(
ds_fn, sparse_tensors=sparse_tensors)
return init_op, get_next_op, saver
for i in range(len(break_points) + 1):
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = get_ops()
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
if ckpt_saved:
if init_before_restore:
self._initialize(init_op, sess)
self._restore(saver, sess)
else:
self._initialize(init_op, sess)
start = break_points[i - 1] if i > 0 else 0
end = break_points[i] if i < len(break_points) else num_outputs
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
if i == len(break_points) and verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
if save_checkpoint_at_end or i < len(break_points):
self._save(sess, saver)
ckpt_saved = True
return outputs
def match(self, expected, actual):
"""Matches nested structures.
Recursively matches shape and values of `expected` and `actual`.
Handles scalars, numpy arrays and other python sequence containers
e.g. list, dict.
Args:
expected: Nested structure 1.
actual: Nested structure 2.
Raises:
AssertionError if matching fails.
"""
if isinstance(expected, np.ndarray):
expected = expected.tolist()
if isinstance(actual, np.ndarray):
actual = actual.tolist()
self.assertEqual(type(expected), type(actual))
if nest.is_sequence(expected):
self.assertEqual(len(expected), len(actual))
if isinstance(expected, dict):
for key1, key2 in zip(sorted(expected), sorted(actual)):
self.assertEqual(key1, key2)
self.match(expected[key1], actual[key2])
else:
for item1, item2 in zip(expected, actual):
self.match(item1, item2)
else:
self.assertEqual(expected, actual)
def does_not_match(self, expected, actual):
with self.assertRaises(AssertionError):
self.match(expected, actual)
def gen_break_points(self, num_outputs, num_samples=10):
"""Generates `num_samples` breaks points in [0, num_outputs]."""
return np.linspace(0, num_outputs, num_samples, dtype=int)
def _build_graph(self, ds_fn, sparse_tensors=False):
iterator = dataset_ops.make_initializable_iterator(ds_fn())
saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
init_op = iterator.initializer
if sparse_tensors:
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
else:
get_next = iterator.get_next()
self._add_iterator_ops_to_collection(init_op, get_next, ds_fn,
sparse_tensors)
saver = saver_lib.Saver(allow_empty=True)
return init_op, get_next, saver
def _build_empty_graph(self, ds_fn, sparse_tensors=False):
iterator = iterator_ops.Iterator.from_structure(
self._get_output_types(ds_fn),
output_shapes=self._get_output_shapes(ds_fn),
output_classes=self._get_output_classes(ds_fn))
saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
if sparse_tensors:
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
else:
get_next = iterator.get_next()
saver = saver_lib.Saver(allow_empty=True)
return get_next, saver
def _add_iterator_ops_to_collection(self,
init_op,
get_next,
ds_fn,
sparse_tensors=False):
ops.add_to_collection("iterator_ops", init_op)
# `get_next` may be a tuple e.g. in TensorSliceDataset. Since Collections
# do not support tuples we flatten the tensors and restore the shape in
# `_get_iterator_ops_from_collection`.
if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`.
ops.add_to_collection("iterator_ops", get_next.indices)
ops.add_to_collection("iterator_ops", get_next.values)
ops.add_to_collection("iterator_ops", get_next.dense_shape)
return
get_next_list = nest.flatten(get_next)
for i, output_class in enumerate(
nest.flatten(self._get_output_classes(ds_fn))):
if output_class is sparse_tensor.SparseTensor:
ops.add_to_collection("iterator_ops", get_next_list[i].indices)
ops.add_to_collection("iterator_ops", get_next_list[i].values)
ops.add_to_collection("iterator_ops", get_next_list[i].dense_shape)
else:
ops.add_to_collection("iterator_ops", get_next_list[i])
def _get_iterator_ops_from_collection(self, ds_fn, sparse_tensors=False):
all_ops = ops.get_collection("iterator_ops")
if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`.
init_op, indices, values, dense_shape = all_ops
return init_op, sparse_tensor.SparseTensor(indices, values, dense_shape)
get_next_list = []
i = 1
for output_class in nest.flatten(self._get_output_classes(ds_fn)):
if output_class is sparse_tensor.SparseTensor:
indices, values, dense_shape = all_ops[i:i + 3]
i += 3
get_next_list.append(
sparse_tensor.SparseTensor(indices, values, dense_shape))
else:
get_next_list.append(all_ops[i])
i += 1
return all_ops[0], nest.pack_sequence_as(
self._get_output_types(ds_fn), get_next_list)
def _get_output_types(self, ds_fn):
with ops.Graph().as_default():
return dataset_ops.get_legacy_output_types(ds_fn())
def _get_output_shapes(self, ds_fn):
with ops.Graph().as_default():
return dataset_ops.get_legacy_output_shapes(ds_fn())
def _get_output_classes(self, ds_fn):
with ops.Graph().as_default():
return dataset_ops.get_legacy_output_classes(ds_fn())
def _ckpt_path(self):
return os.path.join(self.get_temp_dir(), "iterator")
def _latest_ckpt(self):
return checkpoint_management.latest_checkpoint(self.get_temp_dir())
def _save(self, sess, saver):
saver.save(sess, self._ckpt_path())
def _restore(self, saver, sess):
sess.run(lookup_ops.tables_initializer())
saver.restore(sess, self._latest_ckpt())
def _initialize(self, init_op, sess):
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
sess.run(init_op)
def _import_meta_graph(self):
meta_file_path = self._ckpt_path() + ".meta"
return saver_lib.import_meta_graph(meta_file_path)
def _delete_ckpt(self):
# Remove all checkpoint files.
prefix = self._ckpt_path()
pattern = prefix + "*"
files = gfile.Glob(pattern)
map(gfile.Remove, files)
|
jbedorf/tensorflow
|
tensorflow/python/data/experimental/kernel_tests/serialization/dataset_serialization_test_base.py
|
Python
|
apache-2.0
| 26,060
| 0.004298
|
from flappy.display3d.vertexbuffer3d import VertexBuffer3D, VertexBuffer3DFormat
from flappy.display3d.indexbuffer3d import IndexBuffer3D
from flappy.display3d.program3d import Program3D
from flappy.display3d.texture import Texture
from flappy.display3d.scene3d import Scene3D
|
wannaphongcom/flappy
|
flappy/display3d/__init__.py
|
Python
|
mit
| 279
| 0.003584
|
import asyncio
import logging
import concurrent.futures
class EchoServer(object):
"""Echo server class"""
def __init__(self, host, port, loop=None):
self._loop = loop or asyncio.get_event_loop()
self._server = asyncio.start_server(self.handle_connection, host=host, port=port)
def start(self, and_loop=True):
self._server = self._loop.run_until_complete(self._server)
logging.info('Listening established on {0}'.format(self._server.sockets[0].getsockname()))
if and_loop:
self._loop.run_forever()
def stop(self, and_loop=True):
self._server.close()
if and_loop:
self._loop.close()
@asyncio.coroutine
def handle_connection(self, reader, writer):
peername = writer.get_extra_info('peername')
logging.info('Accepted connection from {}'.format(peername))
while not reader.at_eof():
try:
data = yield from asyncio.wait_for(reader.readline(), timeout=10.0)
writer.write(data)
except concurrent.futures.TimeoutError:
break
writer.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
server = EchoServer('127.0.0.1', 8899)
try:
server.start()
except KeyboardInterrupt:
pass # Press Ctrl+C to stop
finally:
server.stop()
|
kaefik/zadanie-python
|
echoserver.py
|
Python
|
mit
| 1,395
| 0.002867
|
import numpy as np
import numpy.testing as npt
import AFQ.utils.parallel as para
def power_it(num, n=2):
# We define a function of the right form for parallelization
return num ** n
def test_parfor():
my_array = np.arange(100).reshape(10, 10)
i, j = np.random.randint(0, 9, 2)
my_list = list(my_array.ravel())
for engine in ["joblib", "dask", "serial"]:
for backend in ["threading", "multiprocessing"]:
npt.assert_equal(para.parfor(power_it,
my_list,
engine=engine,
backend=backend,
out_shape=my_array.shape)[i, j],
power_it(my_array[i, j]))
# If it's not reshaped, the first item should be the item 0, 0:
npt.assert_equal(para.parfor(power_it,
my_list,
engine=engine,
backend=backend)[0],
power_it(my_array[0, 0]))
|
arokem/pyAFQ
|
AFQ/utils/tests/test_parallel.py
|
Python
|
bsd-2-clause
| 1,135
| 0
|
#!/usr/bin/env python3.4
# dotslash for local
from flask import Flask, render_template, request, redirect
from werkzeug.contrib.fixers import ProxyFix
from urllib.request import urlopen, Request
from urllib.parse import urlparse
from omxplayer import OMXPlayer
from youtube_dl import YoutubeDL
from youtube_dl.utils import DownloadError
from livestreamer import Livestreamer, PluginError
import os
import traceback
import re
import json
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
player = None
title = None
last_logged_message = ""
# this regex is to escape terminal color codes.
_ANSI_ESCAPE_REXP = re.compile(r"\x1b[^m]*m")
@app.route('/about/')
def splash():
return render_template('splash.html')
@app.route('/')
def root(): # redirect to remote for now, might change.
return redirect('/remote')
@app.route('/remote/')
def remote():
return render_template('remote.html')
@app.route('/settings/')
def settings():
return render_template('settings.html')
@app.route('/remote/omxplayer/<command>') # sending keys from the remote
def omxplayer_remote(command):
player = get_player()
if player is not None:
getattr(player, command)()
return '', 204
else:
return 'nothing playing', 400
@app.route('/remote/system/<command>')
def system_remote(command):
if command == "reboot":
log('rebooting!')
os.system("sudo reboot")
else:
return 'bad command', 400
return '', 204 # success!
@app.route('/status/')
def status():
player = get_player()
if player is not None:
dictionary = {
'video_loaded': True,
'paused': player.paused,
'now_playing': title
}
else:
dictionary = {'video_loaded': False}
return json.dumps(dictionary)
@app.route('/play', methods=['GET'])
def play_url(): # this only plays http urls for now, torrents soon.
global title
url = request.args.get('url') # grab url from /play?url=*
if not url.startswith('http'): # in case the user forgot it
log('url missing http/wrong protocol')
url = 'http://' + url # let's assume it's http, not https
log('received url %s' % url)
log('requesting headers from %s...' % url)
req = Request(url)
req.get_method = lambda: 'HEAD' # only request headers, no content
response = urlopen(req)
ctype = response.headers['content-type']
ctype_split = ctype.split('/') # split into 2 parts
log('headers received. content type is %s' % ctype)
try:
if ctype_split[0] == 'audio' or ctype_split[0] == 'video':
log('url was raw media file, playing! :)')
title = url # i guess this works? :T
play_omxplayer(url)
elif ctype_split[1] == 'x-bittorrent':
log('loading torrents not implemented.')
# this isn't implemented yet.
elif ctype_split[0] == 'text':
# here we check if it's a livestream, and if so get the RTMP url
log('checking if url is a livestream...')
live = Livestreamer()
try:
if "youtube" in url:
raise RuntimeError("youtube is fucked up w/ streaming, falling back to youtube-dl")
plugin = live.resolve_url(url)
streams = plugin.get_streams()
stream = streams.get("best") # fingers crossed for best quality
stream_url_types = ['rtmp', 'url'] # things that livestreamer can have :D
for stream_type in stream_url_types:
if hasattr(stream, stream_type):
log('url is livestream!')
title = "%s (livestream)" % url
play_omxplayer(getattr(stream, stream_type))
return '', 204
except (PluginError, RuntimeError) as e: # therefore url is not (supported) livestream
pass # continue and let youtube-dl try.
log('loading youtube-dl for further processing')
ydl = YoutubeDL({'outtmpl': '%(id)s%(ext)s', 'restrictfilenames': True})
ydl.add_default_info_extractors()
result = ydl.extract_info(url, download=False)
if 'entries' in result: # if video is a playlist
video = result['entries'][0] # play the 1st video in the playlist
else:
video = result
play_omxplayer(video['url'])
title = video['title']
else:
raise DownloadError('Invalid filetype: not audio, video, or text.')
return '', 204 # success w/ no response!
except (UnicodeDecodeError, DownloadError) as e:
return _ANSI_ESCAPE_REXP.sub('', str(e)), 400 # send error message
@app.route("/log/")
def gen_log():
return get_last_logged_message()
def play_omxplayer(uri):
log('playing %s in omxplayer...' % uri)
global player
if get_player() is not None:
player.stop()
player = OMXPlayer(uri,
args='-b -r --audio_queue=10 --video_queue=40',
start_playback=True)
def log(text):
print("[sparky] %s" % text)
global last_logged_message
last_logged_message = text
def get_last_logged_message():
global last_logged_message
return last_logged_message
def get_player():
global player
if player is not None and player.has_finished():
player = None
title = None
return player
if __name__ == '__main__':
app.run("0.0.0.0", debug=True)
|
aerobit/sparky
|
sparky.py
|
Python
|
unlicense
| 5,633
| 0.00213
|
from axiom.test.historic.stubloader import StubbedTest
from xquotient.mail import MailTransferAgent
from axiom.userbase import LoginSystem
class MTAUpgraderTest(StubbedTest):
def testMTA2to3(self):
"""
Make sure MailTransferAgent upgraded OK and that its
"userbase" attribute refers to the store's userbase.
"""
mta = self.store.findUnique(MailTransferAgent)
self.assertIdentical(mta.userbase,
self.store.findUnique(LoginSystem))
|
twisted/quotient
|
xquotient/test/historic/test_mta2to3.py
|
Python
|
mit
| 512
| 0.001953
|
# -*- coding: utf-8 -*-
#
# Gateway documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 25 06:46:30 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.append(os.path.abspath('_themes'))
sys.path.append(os.path.abspath('.'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.1'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Gateway'
copyright = u'2012, Stephane Wirtel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import pkg_resources
try:
release = pkg_resources.get_distribution('gateway').version
except pkg_resources.DistributionNotFound:
print 'To build the documentation, The distribution information of Gateway'
print 'Has to be available. Either install the package into your'
print 'development environment or run "setup.py develop" to setup the'
print 'metadata. A virtualenv is recommended!'
sys.exit(1)
del pkg_resources
if 'dev' in release:
release = release.split('dev')[0] + 'dev'
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
pygments_style = 'flask_theme_support.FlaskyStyle'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Gatewaydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'fontpkg' : r'\usepackage{mathpazo}',
'papersize' : 'a4paper',
'pointsize' : '12pt',
'preamble' : r' \usepackage{flaskstyle}',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Gateway.tex', u'Gateway Documentation',
u'Stephane Wirtel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
latex_use_modindex = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
latex_additional_files = [
'flaskstyle.sty',
]
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gateway', u'Gateway Documentation',
[u'Stephane Wirtel'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Gateway', u'Gateway Documentation',
u'Stephane Wirtel', 'Gateway', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
#locale_dirs = ['translated/']
#language = 'fr'
|
matrixise/gateway
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,914
| 0.00718
|
# force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append("../../../../")
import BellZhurkov.Python.TestExamples.TestUtil.Bell_Test_Data as Data
import BellZhurkov.Python.Code.BellZhurkov as BellModel
def RunWoodsideFigure6():
"""
Reproduces Figure 6 From:
Woodside, Michael T., and Steven M. Block.
"Reconstructing Folding Energy Landscapes by Single-Molecule Force Spectroscopy"
Annual Review of Biophysics 43, no. 1 (2014): 19-39.
doi:10.1146/annurev-biophys-051013-022754.
See TestExamples.TestUtil.Bell_Test_Data.Woodside2014FoldingAndUnfoldingData
"""
BellData = Data.Woodside2014FoldingAndUnfoldingData()
Forces,Folding,Unfolding = (BellData.Forces,BellData.RatesFold,
BellData.RatesUnfold)
# everything in SI initially
vary = dict(beta=False,
k0=False,
DeltaG=True,
DeltaX=True)
GuessDict = dict(beta=1/(4.1e-21),
k0=1,
DeltaX=20e-9,
DeltaG=0)
opt = dict(Values=GuessDict,
Vary=vary)
infFold = BellModel.BellZurkovFit(Forces,Folding,**opt)
infUnfold = BellModel.BellZurkovFit(Forces,Unfolding,**opt)
# get predictions along a (slightly larger) x range
xMin=11e-12
xMax=15e-12
# how much should we interpolate?
numPredict = (len(Forces)+1)*50
xRangePredict = np.linspace(xMin,xMax,numPredict)
predictFold = infFold.Predict(xRangePredict)
predictUnfold = infUnfold.Predict(xRangePredict)
markerDict = dict(marker='o',
markersize=7,
linewidth=0,
markeredgewidth=0.0)
lineDict = dict(linestyle='-',color='k',linewidth=1.5)
toPn = 1e12
ForcePn = Forces*toPn
fig = plt.figure()
ax = plt.subplot(1,1,1)
plt.plot(ForcePn,Folding,'ro',label="Folding",**markerDict)
plt.plot(xRangePredict*toPn,predictFold,**lineDict)
plt.plot(ForcePn,Unfolding,'bo',label="Unfolding",**markerDict)
plt.plot(xRangePredict*toPn,predictUnfold,**lineDict)
ax.set_yscale('log')
# limits in PicoNewtons
plt.xlim(xMin*toPn,xMax*toPn)
plt.xlabel("Force (pN)")
plt.ylabel("Rate (Hz)")
plt.title("Woodside and Block, Figure 6a (2016)")
plt.legend(loc='lower center')
fig.savefig("./Woodside2016_Figure6.png")
def RunSchlierf2006Figure1a():
DataToTest = Data.Schlierf2006Figure1a()
Forces,Folding = (DataToTest.Forces,DataToTest.RatesFold)
# everything in SI initially
vary = dict(beta=False,
k0=True,
DeltaG=False,
DeltaX=True)
GuessDict = dict(beta=1/(4.1e-21),
k0=0.35,
DeltaX=5e-10,
DeltaG=0)
opt = dict(Values=GuessDict,
Vary=vary)
infFold = BellModel.BellZurkovFit(Forces,Folding,**opt)
def run():
"""
Runs examples of the Bell-Zhurkov Model
"""
RunSchlierf2006Figure1a()
RunWoodsideFigure6()
if __name__ == "__main__":
run()
|
prheenan/BioModel
|
BellZhurkov/Python/TestExamples/Examples/Bell_Examples.py
|
Python
|
gpl-2.0
| 3,266
| 0.013778
|
"""
Models to support Course Surveys feature
"""
import logging
from lxml import etree
from collections import OrderedDict
from django.db import models
from student.models import User
from django.core.exceptions import ValidationError
from model_utils.models import TimeStampedModel
from survey.exceptions import SurveyFormNameAlreadyExists, SurveyFormNotFound
from xmodule_django.models import CourseKeyField
log = logging.getLogger("edx.survey")
class SurveyForm(TimeStampedModel):
"""
Model to define a Survey Form that contains the HTML form data
that is presented to the end user. A SurveyForm is not tied to
a particular run of a course, to allow for sharing of Surveys
across courses
"""
name = models.CharField(max_length=255, db_index=True, unique=True)
form = models.TextField()
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
"""
Override save method so we can validate that the form HTML is
actually parseable
"""
self.validate_form_html(self.form)
# now call the actual save method
super(SurveyForm, self).save(*args, **kwargs)
@classmethod
def validate_form_html(cls, html):
"""
Makes sure that the html that is contained in the form field is valid
"""
try:
fields = cls.get_field_names_from_html(html)
except Exception as ex:
log.exception("Cannot parse SurveyForm html: {}".format(ex))
raise ValidationError("Cannot parse SurveyForm as HTML: {}".format(ex))
if not len(fields):
raise ValidationError("SurveyForms must contain at least one form input field")
@classmethod
def create(cls, name, form, update_if_exists=False):
"""
Helper class method to create a new Survey Form.
update_if_exists=True means that if a form already exists with that name, then update it.
Otherwise throw an SurveyFormAlreadyExists exception
"""
survey = cls.get(name, throw_if_not_found=False)
if not survey:
survey = SurveyForm(name=name, form=form)
else:
if update_if_exists:
survey.form = form
else:
raise SurveyFormNameAlreadyExists()
survey.save()
return survey
@classmethod
def get(cls, name, throw_if_not_found=True):
"""
Helper class method to look up a Survey Form, throw FormItemNotFound if it does not exists
in the database, unless throw_if_not_found=False then we return None
"""
survey = None
exists = SurveyForm.objects.filter(name=name).exists()
if exists:
survey = SurveyForm.objects.get(name=name)
elif throw_if_not_found:
raise SurveyFormNotFound()
return survey
def get_answers(self, user=None, limit_num_users=10000):
"""
Returns all answers for all users for this Survey
"""
return SurveyAnswer.get_answers(self, user, limit_num_users=limit_num_users)
def has_user_answered_survey(self, user):
"""
Returns whether a given user has supplied answers to this
survey
"""
return SurveyAnswer.do_survey_answers_exist(self, user)
def save_user_answers(self, user, answers, course_key):
"""
Store answers to the form for a given user. Answers is a dict of simple
name/value pairs
IMPORTANT: There is no validaton of form answers at this point. All data
supplied to this method is presumed to be previously validated
"""
# first remove any answer the user might have done before
self.clear_user_answers(user)
SurveyAnswer.save_answers(self, user, answers, course_key)
def clear_user_answers(self, user):
"""
Removes all answers that a user has submitted
"""
SurveyAnswer.objects.filter(form=self, user=user).delete()
def get_field_names(self):
"""
Returns a list of defined field names for all answers in a survey. This can be
helpful for reporting like features, i.e. adding headers to the reports
This is taken from the set of <input> fields inside the form.
"""
return SurveyForm.get_field_names_from_html(self.form)
@classmethod
def get_field_names_from_html(cls, html):
"""
Returns a list of defined field names from a block of HTML
"""
names = []
# make sure the form is wrap in some outer single element
# otherwise lxml can't parse it
# NOTE: This wrapping doesn't change the ability to query it
tree = etree.fromstring(u'<div>{}</div>'.format(html))
input_fields = (
tree.findall('.//input') + tree.findall('.//select') +
tree.findall('.//textarea')
)
for input_field in input_fields:
if 'name' in input_field.keys() and input_field.attrib['name'] not in names:
names.append(input_field.attrib['name'])
return names
class SurveyAnswer(TimeStampedModel):
"""
Model for the answers that a user gives for a particular form in a course
"""
user = models.ForeignKey(User, db_index=True)
form = models.ForeignKey(SurveyForm, db_index=True)
field_name = models.CharField(max_length=255, db_index=True)
field_value = models.CharField(max_length=1024)
# adding the course_id where the end-user answered the survey question
# since it didn't exist in the beginning, it is nullable
course_key = CourseKeyField(max_length=255, db_index=True, null=True)
@classmethod
def do_survey_answers_exist(cls, form, user):
"""
Returns whether a user has any answers for a given SurveyForm for a course
This can be used to determine if a user has taken a CourseSurvey.
"""
return SurveyAnswer.objects.filter(form=form, user=user).exists()
@classmethod
def get_answers(cls, form, user=None, limit_num_users=10000):
"""
Returns all answers a user (or all users, when user=None) has given to an instance of a SurveyForm
Return is a nested dict which are simple name/value pairs with an outer key which is the
user id. For example (where 'field3' is an optional field):
results = {
'1': {
'field1': 'value1',
'field2': 'value2',
},
'2': {
'field1': 'value3',
'field2': 'value4',
'field3': 'value5',
}
:
:
}
limit_num_users is to prevent an unintentional huge, in-memory dictionary.
"""
if user:
answers = SurveyAnswer.objects.filter(form=form, user=user)
else:
answers = SurveyAnswer.objects.filter(form=form)
results = OrderedDict()
num_users = 0
for answer in answers:
user_id = answer.user.id
if user_id not in results and num_users < limit_num_users:
results[user_id] = OrderedDict()
num_users = num_users + 1
if user_id in results:
results[user_id][answer.field_name] = answer.field_value
return results
@classmethod
def save_answers(cls, form, user, answers, course_key):
"""
Store answers to the form for a given user. Answers is a dict of simple
name/value pairs
IMPORTANT: There is no validaton of form answers at this point. All data
supplied to this method is presumed to be previously validated
"""
for name in answers.keys():
value = answers[name]
# See if there is an answer stored for this user, form, field_name pair or not
# this will allow for update cases. This does include an additional lookup,
# but write operations will be relatively infrequent
value = answers[name]
defaults = {"field_value": value}
if course_key:
defaults['course_key'] = course_key
answer, created = SurveyAnswer.objects.get_or_create(
user=user,
form=form,
field_name=name,
defaults=defaults
)
if not created:
# Allow for update cases.
answer.field_value = value
answer.course_key = course_key
answer.save()
|
ahmadiga/min_edx
|
lms/djangoapps/survey/models.py
|
Python
|
agpl-3.0
| 8,631
| 0.001738
|
import sys
try:
import setuptools
from setuptools import setup
except ImportError:
setuptools = None
from distutils.core import setup
version = '0.0.1'
kwargs = {}
if setuptools is not None:
kwargs['install_requires'] = ['tornado>=4.3']
if sys.version_info < (3, 4):
kwargs['install_requires'].append('enum34')
setup(
name='tornado_http2',
version=version,
packages=['tornado_http2', 'tornado_http2.test'],
package_data={
'tornado_http2': [
'hpack_static_table.txt',
'hpack_huffman_data.txt',
],
'tornado_http2.test': [
'test.crt',
'test.key',
],
},
**kwargs)
|
bdarnell/tornado_http2
|
setup.py
|
Python
|
apache-2.0
| 702
| 0
|
import io
import os
import unittest
from stango import Stango
from stango.files import Files
from . import StangoTestCase, make_suite, view_value, view_template
dummy_view = view_value('')
class GenerateTestCase(StangoTestCase):
def setup(self):
self.tmp = self.tempdir()
self.manager = Stango()
self.manager.index_file = 'index.html'
def test_generate_simple(self):
self.manager.files += [
('', view_value('foobar')),
('barfile.txt', view_value('barfoo')),
]
self.manager.generate(self.tmp)
self.eq(sorted(os.listdir(self.tmp)), ['barfile.txt', 'index.html'])
with open(os.path.join(self.tmp, 'index.html')) as fobj:
self.eq(fobj.read(), 'foobar')
with open(os.path.join(self.tmp, 'barfile.txt')) as fobj:
self.eq(fobj.read(), 'barfoo')
def test_generate_dest_is_non_dir(self):
self.manager.files = Files(
('', dummy_view),
)
dest_path = os.path.join(self.tmp, 'dest.txt')
with open(dest_path, 'w') as fobj:
fobj.write('foo')
exc = self.assert_raises(ValueError, self.manager.generate, dest_path)
self.eq(str(exc), "'%s' is not a directory" % dest_path)
# Check the file wasn't modified
self.eq(os.listdir(self.tmp), ['dest.txt'])
with open(os.path.join(self.tmp, 'dest.txt'), 'r') as fobj:
self.eq(fobj.read(), 'foo')
def test_generate_outdir_exists(self):
# Create a file and a directory to outdir
with open(os.path.join(self.tmp, 'foo'), 'w') as fobj:
fobj.write('bar')
os.mkdir(os.path.join(self.tmp, 'dummydir'))
self.eq(sorted(os.listdir(self.tmp)), ['dummydir', 'foo'])
self.manager.files = Files(
('', view_value('baz')),
)
self.manager.generate(self.tmp)
# Check that the old destdir contents were removed
self.eq(os.listdir(self.tmp), ['index.html'])
def test_generate_different_index_file(self):
self.manager.index_file = 'foofile.txt'
self.manager.files += [
('', view_value('foobar')),
('barfile.txt', view_value('barfoo')),
]
self.manager.generate(self.tmp)
self.eq(sorted(os.listdir(self.tmp)), ['barfile.txt', 'foofile.txt'])
with open(os.path.join(self.tmp, 'foofile.txt')) as fobj:
self.eq(fobj.read(), 'foobar')
with open(os.path.join(self.tmp, 'barfile.txt')) as fobj:
self.eq(fobj.read(), 'barfoo')
def test_view_returns_a_bytes_object(self):
self.manager.files = Files(
('', view_value(b'\xde\xad\xbe\xef')),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'rb') as fobj:
self.eq(fobj.read(), b'\xde\xad\xbe\xef')
def test_view_returns_a_bytearray_object(self):
self.manager.files = Files(
('', view_value(bytearray(b'\xba\xdc\x0f\xfe'))),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'rb') as fobj:
self.eq(fobj.read(), b'\xba\xdc\x0f\xfe')
def test_view_returns_a_filelike_object_with_str_contents(self):
self.manager.files = Files(
('', view_value(io.StringIO('foobar'))),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'r') as fobj:
self.eq(fobj.read(), 'foobar')
def test_view_returns_a_filelike_object_with_bytes_contents(self):
self.manager.files = Files(
('', view_value(io.BytesIO(b'barfoo'))),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'r') as fobj:
self.eq(fobj.read(), 'barfoo')
def test_view_renders_a_template(self):
self.manager.template_dirs.insert(0, self.template_path)
self.manager.files = Files(
('', view_template('value.txt'), {'value': 'foobar'})
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html')) as fobj:
self.eq(fobj.read(), 'value is: foobar')
def test_no_index_file(self):
self.manager.index_file = None
self.manager.files = Files(
('quux/', dummy_view),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "Directory path and no index_file: 'quux/'")
def test_view_returns_None(self):
self.manager.files = Files(
('', view_value(None)),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "The result of view 'value_returner' for path '' is not a str, bytes or bytearray instance or a file-like object")
def test_view_returns_an_integer(self):
self.manager.files = Files(
('foo.txt', view_value(1)),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "The result of view 'value_returner' for path 'foo.txt' is not a str, bytes or bytearray instance or a file-like object")
def test_view_returns_a_filelike_object_with_invalid_contents(self):
class InvalidFile(object):
def read(self):
return 42
self.manager.files = Files(
('', view_value(InvalidFile())),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), "Contents of the file-like object, returned by view 'value_returner' for path '', is not a str, bytes or bytearray instance")
def test_post_render_hook(self):
def post_render_hook(context, data):
return data + b' hurr durr'
self.manager.add_hook('post_render_hook', post_render_hook)
self.manager.files = Files(
('', view_value('foobar')),
)
self.manager.generate(self.tmp)
self.eq(os.listdir(self.tmp), ['index.html'])
with open(os.path.join(self.tmp, 'index.html'), 'rb') as fobj:
self.eq(fobj.read(), b'foobar hurr durr')
def test_post_render_hook_returns_None(self):
self.manager.add_hook('post_render_hook', lambda x, y: None)
self.manager.files = Files(
('', view_value('foobar')),
)
exc = self.assert_raises(ValueError, self.manager.generate, self.tmp)
self.eq(str(exc), 'The result of post_render_hook is not a bytes or bytearray instance for index.html')
def suite():
return make_suite(GenerateTestCase)
|
akheron/stango
|
tests/test_generate.py
|
Python
|
mit
| 6,958
| 0.000862
|
"""
This module contains transformation functions (clip->clip)
One file for one fx. The file's name is the fx's name
"""
|
kerimlcr/ab2017-dpyo
|
ornek/moviepy/moviepy-0.2.2.12/moviepy/video/fx/__init__.py
|
Python
|
gpl-3.0
| 121
| 0
|
#! /usr/bin/env python
import os
import sys
import glob
version = (int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]))
def substitute_file(name):
subst = ''
f = open(name)
for l in f:
if '#define LIBTORRENT_VERSION_MAJOR' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION_MAJOR %d\n' % version[0]
elif '#define LIBTORRENT_VERSION_MINOR' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION_MINOR %d\n' % version[1]
elif '#define LIBTORRENT_VERSION_TINY' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION_TINY %d\n' % version[2]
elif '#define LIBTORRENT_VERSION ' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION "%d.%d.%d.%d"\n' % (version[0], version[1], version[2], version[3])
elif 'AC_INIT([libtorrent-rasterbar]' in l and name.endswith('.ac'):
l = 'AC_INIT([libtorrent-rasterbar],[%d.%d.%d],[arvid@libtorrent.org],\n' % (version[0], version[1], version[2])
elif 'set (VERSION ' in l and name.endswith('.txt'):
l = 'set (VERSION "%d.%d.%d")\n' % (version[0], version[1], version[2])
elif ':Version: ' in l and (name.endswith('.rst') or name.endswith('.py')):
l = ':Version: %d.%d.%d\n' % (version[0], version[1], version[2])
elif 'VERSION = ' in l and name.endswith('Jamfile'):
l = 'VERSION = %d.%d.%d ;\n' % (version[0], version[1], version[2])
elif 'version=' in l and name.endswith('setup.py'):
l = "\tversion = '%d.%d.%d',\n" % (version[0], version[1], version[2])
elif "version = '" in l and name.endswith('setup.py'):
l = "\tversion = '%d.%d.%d',\n" % (version[0], version[1], version[2])
subst += l
f.close()
open(name, 'w+').write(subst)
substitute_file('include/libtorrent/version.hpp')
substitute_file('CMakeLists.txt')
substitute_file('configure.ac')
substitute_file('bindings/python/setup.py')
substitute_file('docs/gen_reference_doc.py')
for i in glob.glob('docs/*.rst'):
substitute_file(i)
substitute_file('Jamfile')
|
steeve/libtorrent
|
set_version.py
|
Python
|
bsd-3-clause
| 1,975
| 0.021772
|
print "Loading USBDriver : Logitech Cordless RumblePad 2"
class USBDriver :
def __init__(self):
self.componentNextThrottleFrame = "Hat Switch" # Component for throttle frames browsing
self.valueNextThrottleFrame = 0.5
self.componentPreviousThrottleFrame = "Hat Switch"
self.valuePreviousThrottleFrame = 1
self.componentNextRunningThrottleFrame = "" # Component for running throttle frames browsing
self.valueNextRunningThrottleFrame = 0.75
self.componentPreviousRunningThrottleFrame = ""
self.valuePreviousRunningThrottleFrame = 0.25
# From there available only when no throttle is active in current window
self.componentNextRosterBrowse = "Hat Switch" # Component for roster browsing
self.valueNextRoster = 0.75
self.componentPreviousRosterBrowse = "Hat Switch"
self.valuePreviousRoster = 0.25
self.componentRosterSelect = "Button 4" # Component to select a roster
self.valueRosterSelect = 1
# From there available only when a throttle is active in current window
self.componentThrottleRelease = "Button 5" # Component to release current throttle
self.valueThrottleRelease = 1
self.componentSpeed = "X Axis" # Analog axis component for curent throttle speed
self.valueSpeedTrigger = 0.05 # ignore values lower than
self.componentSpeedMultiplier = .5 # multiplier for pad value (negative values to reveerse)
self.componentSpeedIncrease = ""
self.valueSpeedIncrease = 1
self.componentSpeedDecrease = ""
self.valueSpeedDecrease = 1
self.componentDirectionForward = "Z Rotation" # Analog axis component for curent throttle direction
self.valueDirectionForward = -1
self.componentDirectionBackward = "Z Rotation"
self.valueDirectionBackward = 1
self.componentStopSpeed = "Button 7" # Preset speed button stop, double tap will Estop
self.valueStopSpeed = 1
self.componentSlowSpeed = "" # Preset speed button slow
self.valueSlowSpeed = 1
self.componentCruiseSpeed = "" # Preset speed button cruise, double tap will max speed
self.valueCruiseSpeed = 1
self.componentMaxSpeed = "" # Preset speed button max
self.valueMaxSpeed = 1
self.componentF0 = "Button 0" # Function button
self.valueF0 = 1
self.valueF0Off = 0 # off event for non lockable functions
self.componentF1 = "Button 1" # Function button
self.valueF1 = 1
self.valueF1Off = 0
self.componentF2 = "Button 2" # Function button
self.valueF2 = 1
self.valueF2Off = 0
self.componentF3 = "Button 3" # Function button
self.valueF3 = 1
self.valueF3Off = 0
self.componentF4 = "" # Function button
self.valueF4 = 1
self.valueF4Off = 0
self.componentF5 = "" # Function button
self.valueF5 = 1
self.valueF5Off = 0
self.componentF6 = "" # Function button
self.valueF6 = 1
self.valueF6Off = 0
self.componentF7 = "" # Function button
self.valueF7 = 1
self.valueF7Off = 0
self.componentF8 = "" # Function button
self.valueF8 = 1
self.valueF8Off = 0
self.componentF9 = "" # Function button
self.valueF9 = 1
self.valueF9Off = 0
self.componentF10 = "" # Function button
self.valueF10 = 1
self.valueF10Off = 0
self.componentF11 = "" # Function button
self.valueF11 = 1
self.valueF11Off = 0
self.componentF12 = "" # Function button
self.valueF12 = 1
self.valueF12Off = 0
self.componentF13 = "" # Function button
self.valueF13 = 1
self.valueF13Off = 0
self.componentF14 = "" # Function button
self.valueF14 = 1
self.valueF14Off = 0
self.componentF15 = "" # Function button
self.valueF15 = 1
self.valueF15Off = 0
self.componentF16 = "" # Function button
self.valueF16 = 1
self.valueF16Off = 0
self.componentF17 = "" # Function button
self.valueF17 = 1
self.valueF17Off = 0
self.componentF18 = "" # Function button
self.valueF18 = 1
self.valueF18Off = 0
self.componentF19 = "" # Function button
self.valueF19 = 1
self.valueF19Off = 0
self.componentF20 = "" # Function button
self.valueF20 = 1
self.valueF20Off = 0
self.componentF21 = "" # Function button
self.valueF21 = 1
self.valueF21Off = 0
self.componentF22 = "" # Function button
self.valueF22 = 1
self.valueF22Off = 0
self.componentF23 = "" # Function button
self.valueF23 = 1
self.valueF23Off = 0
self.componentF24 = "" # Function button
self.valueF24 = 1
self.valueF24Off = 0
self.componentF25 = "" # Function button
self.valueF25 = 1
self.valueF25Off = 0
self.componentF26 = "" # Function button
self.valueF26 = 1
self.valueF26Off = 0
self.componentF27 = "" # Function button
self.valueF27 = 1
self.valueF27Off = 0
self.componentF28 = "" # Function button
self.valueF28 = 1
self.valueF28Off = 0
self.componentF29 = "" # Function button
self.valueF29 = 1
self.valueF29Off = 0
|
ctag/cpe453
|
JMRI/jython/Jynstruments/ThrottleWindowToolBar/USBThrottle.jyn/LogitechCordlessRumblePad2.py
|
Python
|
gpl-2.0
| 5,884
| 0.013936
|
from django.conf import settings
from django.core.management import call_command
from django.db import connection
from django.test.utils import _set_autocommit, TEST_DATABASE_PREFIX
import os, re, sys
def getstatusoutput(cmd):
"A simpler version of getstatusoutput that works on win32 platforms."
stdin, stdout, stderr = os.popen3(cmd)
output = stdout.read()
if output.endswith('\n'): output = output[:-1]
status = stdin.close()
return status, output
def create_lang(db_name, verbosity=1):
"Sets up the pl/pgsql language on the given database."
# Getting the command-line options for the shell command
options = get_cmd_options(db_name)
# Constructing the 'createlang' command.
createlang_cmd = 'createlang %splpgsql' % options
if verbosity >= 1: print createlang_cmd
# Must have database super-user privileges to execute createlang -- it must
# also be in your path.
status, output = getstatusoutput(createlang_cmd)
# Checking the status of the command, 0 => execution successful
if status:
raise Exception("Error executing 'plpgsql' command: %s\n" % output)
def _create_with_cursor(db_name, verbosity=1, autoclobber=False):
"Creates database with psycopg2 cursor."
# Constructing the necessary SQL to create the database (the DATABASE_USER
# must possess the privileges to create a database)
create_sql = 'CREATE DATABASE %s' % connection.ops.quote_name(db_name)
if settings.DATABASE_USER:
create_sql += ' OWNER %s' % settings.DATABASE_USER
cursor = connection.cursor()
_set_autocommit(connection)
try:
# Trying to create the database first.
cursor.execute(create_sql)
#print create_sql
except Exception, e:
# Drop and recreate, if necessary.
if not autoclobber:
confirm = raw_input("\nIt appears the database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % db_name)
if autoclobber or confirm == 'yes':
if verbosity >= 1: print 'Destroying old spatial database...'
drop_db(db_name)
if verbosity >= 1: print 'Creating new spatial database...'
cursor.execute(create_sql)
else:
raise Exception('Spatial Database Creation canceled.')
foo = _create_with_cursor
created_regex = re.compile(r'^createdb: database creation failed: ERROR: database ".+" already exists')
def _create_with_shell(db_name, verbosity=1, autoclobber=False):
"""
If no spatial database already exists, then using a cursor will not work.
Thus, a `createdb` command will be issued through the shell to bootstrap
creation of the spatial database.
"""
# Getting the command-line options for the shell command
options = get_cmd_options(False)
create_cmd = 'createdb -O %s %s%s' % (settings.DATABASE_USER, options, db_name)
if verbosity >= 1: print create_cmd
# Attempting to create the database.
status, output = getstatusoutput(create_cmd)
if status:
if created_regex.match(output):
if not autoclobber:
confirm = raw_input("\nIt appears the database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % db_name)
if autoclobber or confirm == 'yes':
if verbosity >= 1: print 'Destroying old spatial database...'
drop_cmd = 'dropdb %s%s' % (options, db_name)
status, output = getstatusoutput(drop_cmd)
if status != 0:
raise Exception('Could not drop database %s: %s' % (db_name, output))
if verbosity >= 1: print 'Creating new spatial database...'
status, output = getstatusoutput(create_cmd)
if status != 0:
raise Exception('Could not create database after dropping: %s' % output)
else:
raise Exception('Spatial Database Creation canceled.')
else:
raise Exception('Unknown error occurred in creating database: %s' % output)
def create_spatial_db(test=False, verbosity=1, autoclobber=False, interactive=False):
"Creates a spatial database based on the settings."
# Making sure we're using PostgreSQL and psycopg2
if settings.DATABASE_ENGINE != 'postgresql_psycopg2':
raise Exception('Spatial database creation only supported postgresql_psycopg2 platform.')
# Getting the spatial database name
if test:
db_name = get_spatial_db(test=True)
_create_with_cursor(db_name, verbosity=verbosity, autoclobber=autoclobber)
else:
db_name = get_spatial_db()
_create_with_shell(db_name, verbosity=verbosity, autoclobber=autoclobber)
# Creating the db language, does not need to be done on NT platforms
# since the PostGIS installer enables this capability.
if os.name != 'nt':
create_lang(db_name, verbosity=verbosity)
# Now adding in the PostGIS routines.
load_postgis_sql(db_name, verbosity=verbosity)
if verbosity >= 1: print 'Creation of spatial database %s successful.' % db_name
# Closing the connection
connection.close()
settings.DATABASE_NAME = db_name
# Syncing the database
call_command('syncdb', verbosity=verbosity, interactive=interactive)
def drop_db(db_name=False, test=False):
"""
Drops the given database (defaults to what is returned from
get_spatial_db()). All exceptions are propagated up to the caller.
"""
if not db_name: db_name = get_spatial_db(test=test)
cursor = connection.cursor()
cursor.execute('DROP DATABASE %s' % connection.ops.quote_name(db_name))
def get_cmd_options(db_name):
"Obtains the command-line PostgreSQL connection options for shell commands."
# The db_name parameter is optional
options = ''
if db_name:
options += '-d %s ' % db_name
if settings.DATABASE_USER:
options += '-U %s ' % settings.DATABASE_USER
if settings.DATABASE_HOST:
options += '-h %s ' % settings.DATABASE_HOST
if settings.DATABASE_PORT:
options += '-p %s ' % settings.DATABASE_PORT
return options
def get_spatial_db(test=False):
"""
Returns the name of the spatial database. The 'test' keyword may be set
to return the test spatial database name.
"""
if test:
if settings.TEST_DATABASE_NAME:
test_db_name = settings.TEST_DATABASE_NAME
else:
test_db_name = TEST_DATABASE_PREFIX + settings.DATABASE_NAME
return test_db_name
else:
if not settings.DATABASE_NAME:
raise Exception('must configure DATABASE_NAME in settings.py')
return settings.DATABASE_NAME
def load_postgis_sql(db_name, verbosity=1):
"""
This routine loads up the PostGIS SQL files lwpostgis.sql and
spatial_ref_sys.sql.
"""
# Getting the path to the PostGIS SQL
try:
# POSTGIS_SQL_PATH may be placed in settings to tell GeoDjango where the
# PostGIS SQL files are located. This is especially useful on Win32
# platforms since the output of pg_config looks like "C:/PROGRA~1/..".
sql_path = settings.POSTGIS_SQL_PATH
except AttributeError:
status, sql_path = getstatusoutput('pg_config --sharedir')
if status:
sql_path = '/usr/local/share'
# The PostGIS SQL post-creation files.
lwpostgis_file = os.path.join(sql_path, 'lwpostgis.sql')
srefsys_file = os.path.join(sql_path, 'spatial_ref_sys.sql')
if not os.path.isfile(lwpostgis_file):
raise Exception('Could not find PostGIS function definitions in %s' % lwpostgis_file)
if not os.path.isfile(srefsys_file):
raise Exception('Could not find PostGIS spatial reference system definitions in %s' % srefsys_file)
# Getting the psql command-line options, and command format.
options = get_cmd_options(db_name)
cmd_fmt = 'psql %s-f "%%s"' % options
# Now trying to load up the PostGIS functions
cmd = cmd_fmt % lwpostgis_file
if verbosity >= 1: print cmd
status, output = getstatusoutput(cmd)
if status:
raise Exception('Error in loading PostGIS lwgeometry routines.')
# Now trying to load up the Spatial Reference System table
cmd = cmd_fmt % srefsys_file
if verbosity >= 1: print cmd
status, output = getstatusoutput(cmd)
if status:
raise Exception('Error in loading PostGIS spatial_ref_sys table.')
# Setting the permissions because on Windows platforms the owner
# of the spatial_ref_sys and geometry_columns tables is always
# the postgres user, regardless of how the db is created.
if os.name == 'nt': set_permissions(db_name)
def set_permissions(db_name):
"""
Sets the permissions on the given database to that of the user specified
in the settings. Needed specifically for PostGIS on Win32 platforms.
"""
cursor = connection.cursor()
user = settings.DATABASE_USER
cursor.execute('ALTER TABLE geometry_columns OWNER TO %s' % user)
cursor.execute('ALTER TABLE spatial_ref_sys OWNER TO %s' % user)
|
paulsmith/geodjango
|
django/contrib/gis/db/backend/postgis/creation.py
|
Python
|
bsd-3-clause
| 9,184
| 0.005989
|
import logging
import unittest
from functools import reduce
from ass_parser import StyleInfo, UsageData
from font_loader import TTFFont, FontInfo, FontLoader, TTCFont, FontWeight
from tests.common import get_file_in_test_directory
class FontLoaderTests(unittest.TestCase):
def test_returns_all_not_found_fonts(self):
loader = FontLoader(None, True)
data = {StyleInfo('Jorvik', 0, False) : UsageData(), StyleInfo('Random font', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(2, len(not_found))
def test_returns_all_found_fonts(self):
loader = FontLoader([get_file_in_test_directory('')], True)
data = {StyleInfo('Jorvik Informal V2', 0, False) : UsageData(), StyleInfo('Random font', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(1, len(found))
self.assertIn('Jorvik Informal V2', list(found.values())[0].names)
def test_performs_case_insensitive_search(self):
loader = FontLoader([get_file_in_test_directory('')], True)
data = {StyleInfo('JoRvIk INFormAl v2', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(1, len(found))
def test_does_not_add_same_font_twice(self):
loader = FontLoader([get_file_in_test_directory(''), get_file_in_test_directory('')], True)
data = {StyleInfo('Jorvik', 0, False) : UsageData(), StyleInfo('Jorvik informal', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(1, len(found))
def test_loads_at_least_some_system_fonts(self):
loader = FontLoader(None, True)
self.assertTrue(len(loader.fonts) > 0)
def test_finds_all_required_fonts(self):
loader = FontLoader(None, True)
loader.fonts.append(FontInfo(['Arial'], False, False, FontWeight.FW_NORMAL, 'random', '1'))
loader.fonts.append(FontInfo(['Arial Black'], False, False, FontWeight.FW_NORMAL, 'random', '2'))
data = {StyleInfo('Arial', 0, False) : UsageData(), StyleInfo('Arial Black', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(2, len(found))
def test_returns_only_appropriate_font(self):
loader = FontLoader(None, True)
loader.fonts.append(FontInfo(['Arial'], False, False, FontWeight.FW_NORMAL, 'random', '1'))
loader.fonts.append(FontInfo(['Arial Black'], False, False, FontWeight.FW_NORMAL, 'random', '2'))
data = {StyleInfo('Arial', 0, False) : UsageData()}
found, not_found = loader.get_fonts_for_list(data)
self.assertEqual(1, len(found))
class TTFFontTests(unittest.TestCase):
def test_ttf_name_matches(self):
font = TTFFont(get_file_in_test_directory('seriously.ttf'))
self.assertIn('Seriously', font.get_info().names)
def test_otf_name_matches(self):
font = TTFFont(get_file_in_test_directory('otfpoc.otf'))
self.assertIn('otfpoc', font.get_info().names)
def test_jorvik_v2_name_matches(self):
font = TTFFont(get_file_in_test_directory('Jorvik.ttf'))
self.assertIn('Jorvik Informal V2', font.get_info().names)
def test_detects_italic_only_font(self):
font = TTFFont(get_file_in_test_directory('CaviarDreams_Italic.ttf'))
self.assertIs(font.get_info().italic, True)
def test_detects_bold_only_font(self):
font = TTFFont(get_file_in_test_directory('Caviar Dreams Bold.ttf'))
self.assertIs(font.get_info().bold, True)
def test_detects_italic_bold_font(self):
font = TTFFont(get_file_in_test_directory('CaviarDreams_BoldItalic.ttf'))
self.assertIs(font.get_info().italic, True)
self.assertIs(font.get_info().bold, True)
def test_parses_fonts_with_platform_id_2_strings(self):
font = TTFFont(get_file_in_test_directory('VANTATHI.TTF'))
self.assertIn('Vanta Thin', font.get_info().names)
def test_parses_fonts_with_utf8_platform_id_0_strings(self):
font = TTFFont(get_file_in_test_directory('SUSANNA_.otf'))
self.assertIn('Susanna', font.get_info().names)
def test_detects_bold_weight(self):
font = TTFFont(get_file_in_test_directory('Caviar Dreams Bold.ttf'))
self.assertEqual(font.get_info().weight, FontWeight.FW_BOLD)
def test_detects_regular_weight(self):
font = TTFFont(get_file_in_test_directory('Jorvik.ttf'))
self.assertEqual(font.get_info().weight, FontWeight.FW_NORMAL)
def test_detects_medium_weight(self):
font = TTFFont(get_file_in_test_directory('seriously.ttf'))
self.assertEqual(font.get_info().weight, FontWeight.FW_MEDIUM)
class TTCFontTests(unittest.TestCase):
def test_contains_all_names(self):
font = TTCFont(get_file_in_test_directory('jorvik_and_seriously.ttc'))
self.assertIn('Seriously', reduce(lambda names, info: names + info.names, font.get_infos(), []))
self.assertIn('Jorvik Informal V2', reduce(lambda names, info: names + info.names, font.get_infos(), []))
class FontInfoTests(unittest.TestCase):
def test_calculates_md5_on_access(self):
info = FontInfo([], False, False, 0, get_file_in_test_directory('Jorvik.ttf'), None)
self.assertIsNotNone(info.md5)
def test_calculates_correct_md5(self):
info = FontInfo([], False, False, 0, get_file_in_test_directory('Jorvik.ttf'), None)
self.assertEqual(info.md5, '0dae05c47e919281d7ac1e0170e4d3a8')
def test_caches_md5_in_private_field(self):
info = FontInfo([], False, False, 0, get_file_in_test_directory('Jorvik.ttf'), None)
self.assertIsNone(info._FontInfo__md5)
md5 = info.md5
self.assertIsNotNone(info._FontInfo__md5)
|
tp7/assfc
|
tests/font_parsing_tests.py
|
Python
|
mit
| 5,846
| 0.004961
|
# Copyright (c) 2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import cStringIO
import json
import zipfile
import os
from M2Crypto import X509
import constants
class Manifest(object):
"""Class containing relevant data from RHSM manifest."""
SIGNATURE_NAME = "signature"
INNER_ZIP_NAME = "consumer_export.zip"
ENTITLEMENTS_PATH = "export/entitlements"
CERTIFICATE_PATH = "export/extensions"
PRODUCTS_PATH = "export/products"
def __init__(self, zip_path):
self.all_entitlements = []
self.manifest_repos = {}
self.sat5_certificate = None
# Signature and signed data
self.signature = None
self.data = None
# Open manifest from path
top_zip = None
inner_zip = None
inner_file = None
try:
top_zip = zipfile.ZipFile(zip_path, 'r')
# Fetch inner zip file into memory
try:
# inner_file = top_zip.open(zip_path.split('.zip')[0] + '/' + self.INNER_ZIP_NAME)
inner_file = top_zip.open(self.INNER_ZIP_NAME)
self.data = inner_file.read()
inner_file_data = cStringIO.StringIO(self.data)
signature_file = top_zip.open(self.SIGNATURE_NAME)
self.signature = signature_file.read()
# Open the inner zip file
try:
inner_zip = zipfile.ZipFile(inner_file_data)
self._load_entitlements(inner_zip)
self._extract_certificate(inner_zip)
finally:
if inner_zip is not None:
inner_zip.close()
finally:
if inner_file is not None:
inner_file.close()
finally:
if top_zip is not None:
top_zip.close()
def _extract_certificate(self, zip_file):
files = zip_file.namelist()
certificates_names = []
for f in files:
if f.startswith(self.CERTIFICATE_PATH) and f.endswith(".xml"):
certificates_names.append(f)
if len(certificates_names) >= 1:
# take only first file
cert_file = zip_file.open(certificates_names[0]) # take only first file
self.sat5_certificate = cert_file.read().strip()
cert_file.close()
else:
raise MissingSatelliteCertificateError("Satellite Certificate was not found in manifest.")
def _fill_product_repositories(self, zip_file, product):
product_file = zip_file.open(self.PRODUCTS_PATH + '/' + str(product.get_id()) + '.json')
product_data = json.load(product_file)
product_file.close()
try:
for content in product_data['productContent']:
content = content['content']
product.add_repository(content['label'], content['contentUrl'])
except KeyError:
print("ERROR: Cannot access required field in product '%s'" % product.get_id())
raise
def _load_entitlements(self, zip_file):
files = zip_file.namelist()
entitlements_files = []
for f in files:
if f.startswith(self.ENTITLEMENTS_PATH) and f.endswith(".json"):
entitlements_files.append(f)
if len(entitlements_files) >= 1:
self.all_entitlements = []
for entitlement_file in entitlements_files:
entitlements = zip_file.open(entitlement_file)
# try block in try block - this is hack for python 2.4 compatibility
# to support finally
try:
try:
data = json.load(entitlements)
# Extract credentials
certs = data['certificates']
if len(certs) != 1:
raise IncorrectEntitlementsFileFormatError(
"ERROR: Single certificate in entitlements file is expected, found: %d"
% len(certs))
cert = certs[0]
credentials = Credentials(data['id'], cert['cert'], cert['key'])
# Extract product IDs
products = []
provided_products = data['pool']['providedProducts']
for provided_product in provided_products:
product = Product(provided_product['productId'])
self._fill_product_repositories(zip_file, product)
products.append(product)
# Skip entitlements not providing any products
if products:
entitlement = Entitlement(products, credentials)
self.all_entitlements.append(entitlement)
except KeyError:
print("ERROR: Cannot access required field in file '%s'" % entitlement_file)
raise
finally:
entitlements.close()
else:
raise IncorrectEntitlementsFileFormatError(
"ERROR: There has to be at least one entitlements file")
def get_all_entitlements(self):
return self.all_entitlements
def get_satellite_certificate(self):
return self.sat5_certificate
def check_signature(self):
if self.signature and self.data:
certs = os.listdir(constants.CANDLEPIN_CA_CERT_DIR)
# At least one certificate has to match
for cert_name in certs:
cert_file = None
try:
cert_file = open(constants.CANDLEPIN_CA_CERT_DIR + '/' + cert_name, 'r')
cert = X509.load_cert_string(cert_file.read())
except (IOError, X509.X509Error):
continue
finally:
if cert_file is not None:
cert_file.close()
pubkey = cert.get_pubkey()
pubkey.reset_context(md='sha256')
pubkey.verify_init()
pubkey.verify_update(self.data)
if pubkey.verify_final(self.signature):
return True
return False
class Entitlement(object):
def __init__(self, products, credentials):
if products and credentials:
self.products = products
self.credentials = credentials
else:
raise IncorrectEntitlementError()
def get_products(self):
return self.products
def get_credentials(self):
return self.credentials
class Credentials(object):
def __init__(self, identifier, cert, key):
if identifier:
self.id = identifier
else:
raise IncorrectCredentialsError(
"ERROR: ID of credentials has to be defined"
)
if cert and key:
self.cert = cert
self.key = key
else:
raise IncorrectCredentialsError(
"ERROR: Trying to create object with cert = %s and key = %s"
% (cert, key)
)
def get_id(self):
return self.id
def get_cert(self):
return self.cert
def get_key(self):
return self.key
class Product(object):
def __init__(self, identifier):
try:
self.id = int(identifier)
except ValueError:
raise IncorrectProductError(
"ERROR: Invalid product id: %s" % identifier
)
self.repositories = {}
def get_id(self):
return self.id
def get_repositories(self):
return self.repositories
def add_repository(self, label, url):
self.repositories[label] = url
class IncorrectProductError(Exception):
pass
class IncorrectEntitlementError(Exception):
pass
class IncorrectCredentialsError(Exception):
pass
class IncorrectEntitlementsFileFormatError(Exception):
pass
class MissingSatelliteCertificateError(Exception):
pass
class ManifestValidationError(Exception):
pass
|
jhutar/spacewalk
|
backend/cdn_tools/manifest.py
|
Python
|
gpl-2.0
| 8,783
| 0.001139
|
# ed25519.py - Optimized version of the reference implementation of Ed25519
#
# Written in 2011? by Daniel J. Bernstein <djb@cr.yp.to>
# 2013 by Donald Stufft <donald@stufft.io>
# 2013 by Alex Gaynor <alex.gaynor@gmail.com>
# 2013 by Greg Price <price@mit.edu>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
"""
NB: This code is not safe for use with secret keys or secret data.
The only safe use of this code is for verifying signatures on public messages.
Functions for computing the public key of a secret key and for signing
a message are included, namely publickey_unsafe and signature_unsafe,
for testing purposes only.
The root of the problem is that Python's long-integer arithmetic is
not designed for use in cryptography. Specifically, it may take more
or less time to execute an operation depending on the values of the
inputs, and its memory access patterns may also depend on the inputs.
This opens it to timing and cache side-channel attacks which can
disclose data to an attacker. We rely on Python's long-integer
arithmetic, so we cannot handle secrets without risking their disclosure.
"""
import hashlib
import operator
import sys
__version__ = "1.0.dev0"
# Useful for very coarse version differentiation.
PY3 = sys.version_info[0] == 3
if PY3:
indexbytes = operator.getitem
intlist2bytes = bytes
int2byte = operator.methodcaller("to_bytes", 1, "big")
else:
int2byte = chr
range = xrange
def indexbytes(buf, i):
return ord(buf[i])
def intlist2bytes(l):
return b"".join(chr(c) for c in l)
b = 256
q = 2 ** 255 - 19
l = 2 ** 252 + 27742317777372353535851937790883648493
def H(m):
return hashlib.sha512(m).digest()
def pow2(x, p):
"""== pow(x, 2**p, q)"""
while p > 0:
x = x * x % q
p -= 1
return x
def inv(z):
"""$= z^{-1} \mod q$, for z != 0"""
# Adapted from curve25519_athlon.c in djb's Curve25519.
z2 = z * z % q # 2
z9 = pow2(z2, 2) * z % q # 9
z11 = z9 * z2 % q # 11
z2_5_0 = (z11 * z11) % q * z9 % q # 31 == 2^5 - 2^0
z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q # 2^10 - 2^0
z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q # ...
z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q
z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q
z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q
z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q
z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q # 2^250 - 2^0
return pow2(z2_250_0, 5) * z11 % q # 2^255 - 2^5 + 11 = q - 2
d = -121665 * inv(121666) % q
I = pow(2, (q - 1) // 4, q)
def xrecover(y):
xx = (y * y - 1) * inv(d * y * y + 1)
x = pow(xx, (q + 3) // 8, q)
if (x * x - xx) % q != 0:
x = (x * I) % q
if x % 2 != 0:
x = q-x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = (Bx % q, By % q, 1, (Bx * By) % q)
ident = (0, 1, 1, 0)
def edwards_add(P, Q):
# This is formula sequence 'addition-add-2008-hwcd-3' from
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
(x1, y1, z1, t1) = P
(x2, y2, z2, t2) = Q
a = (y1-x1)*(y2-x2) % q
b = (y1+x1)*(y2+x2) % q
c = t1*2*d*t2 % q
dd = z1*2*z2 % q
e = b - a
f = dd - c
g = dd + c
h = b + a
x3 = e*f
y3 = g*h
t3 = e*h
z3 = f*g
return (x3 % q, y3 % q, z3 % q, t3 % q)
def edwards_double(P):
# This is formula sequence 'dbl-2008-hwcd' from
# http://www.hyperelliptic.org/EFD/g1p/auto-twisted-extended-1.html
(x1, y1, z1, t1) = P
a = x1*x1 % q
b = y1*y1 % q
c = 2*z1*z1 % q
# dd = -a
e = ((x1+y1)*(x1+y1) - a - b) % q
g = -a + b # dd + b
f = g - c
h = -a - b # dd - b
x3 = e*f
y3 = g*h
t3 = e*h
z3 = f*g
return (x3 % q, y3 % q, z3 % q, t3 % q)
def scalarmult(P, e):
if e == 0:
return ident
Q = scalarmult(P, e // 2)
Q = edwards_double(Q)
if e & 1:
Q = edwards_add(Q, P)
return Q
# Bpow[i] == scalarmult(B, 2**i)
Bpow = []
def make_Bpow():
P = B
for i in range(253):
Bpow.append(P)
P = edwards_double(P)
make_Bpow()
def scalarmult_B(e):
"""
Implements scalarmult(B, e) more efficiently.
"""
# scalarmult(B, l) is the identity
e = e % l
P = ident
for i in range(253):
if e & 1:
P = edwards_add(P, Bpow[i])
e = e // 2
assert e == 0, e
return P
def encodeint(y):
bits = [(y >> i) & 1 for i in range(b)]
return b''.join([
int2byte(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b//8)
])
def encodepoint(P):
(x, y, z, t) = P
zi = inv(z)
x = (x * zi) % q
y = (y * zi) % q
bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1]
return b''.join([
int2byte(sum([bits[i * 8 + j] << j for j in range(8)]))
for i in range(b // 8)
])
def bit(h, i):
return (indexbytes(h, i // 8) >> (i % 8)) & 1
def publickey_unsafe(sk):
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = 2 ** (b - 2) + sum(2 ** i * bit(h, i) for i in range(3, b - 2))
A = scalarmult_B(a)
return encodepoint(A)
def Hint(m):
h = H(m)
return sum(2 ** i * bit(h, i) for i in range(2 * b))
def signature_unsafe(m, sk, pk):
"""
Not safe to use with secret keys or secret data.
See module docstring. This function should be used for testing only.
"""
h = H(sk)
a = 2 ** (b - 2) + sum(2 ** i * bit(h, i) for i in range(3, b - 2))
r = Hint(
intlist2bytes([indexbytes(h, j) for j in range(b // 8, b // 4)]) + m
)
R = scalarmult_B(r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P):
(x, y, z, t) = P
return (z % q != 0 and
x*y % q == z*t % q and
(y*y - x*x - z*z - d*t*t) % q == 0)
def decodeint(s):
return sum(2 ** i * bit(s, i) for i in range(0, b))
def decodepoint(s):
y = sum(2 ** i * bit(s, i) for i in range(0, b - 1))
x = xrecover(y)
if x & 1 != bit(s, b-1):
x = q - x
P = (x, y, 1, (x*y) % q)
if not isoncurve(P):
raise ValueError("decoding point that is not on curve")
return P
class SignatureMismatch(Exception):
pass
def checkvalid(s, m, pk):
"""
Not safe to use when any argument is secret.
See module docstring. This function should be used only for
verifying public signatures of public messages.
"""
if len(s) != b // 4:
raise ValueError("signature length is wrong")
if len(pk) != b // 8:
raise ValueError("public-key length is wrong")
R = decodepoint(s[:b // 8])
A = decodepoint(pk)
S = decodeint(s[b // 8:b // 4])
h = Hint(encodepoint(R) + pk + m)
(x1, y1, z1, t1) = P = scalarmult_B(S)
(x2, y2, z2, t2) = Q = edwards_add(R, scalarmult(A, h))
if (not isoncurve(P) or not isoncurve(Q) or
(x1*z2 - x2*z1) % q != 0 or (y1*z2 - y2*z1) % q != 0):
raise SignatureMismatch("signature does not pass verification")
|
vladimir-v-diaz/securesystemslib
|
securesystemslib/_vendor/ed25519/ed25519.py
|
Python
|
mit
| 7,618
| 0.000656
|
import os
import json
import numpy as np
try:
from numba.pycc import CC
cc = CC('calculate_numba')
except ImportError:
# Will use these as regular Python functions if numba is not present.
class CCSubstitute(object):
# Make a cc.export that doesn't do anything
def export(*args, **kwargs):
def wrapper(func):
return func
return wrapper
cc = CCSubstitute()
@cc.export('ldn_recode_traj', 'i2[:,:](i2[:,:])')
def ldn_recode_traj(x):
# Recode trajectory into deg, stable, imp. Capture trends that are at least
# 95% significant.
#
# Remember that traj is coded as:
# -3: 99% signif decline
# -2: 95% signif decline
# -1: 90% signif decline
# 0: stable
# 1: 90% signif increase
# 2: 95% signif increase
# 3: 99% signif increase
shp = x.shape
x = x.ravel()
x[(x >= -1) & (x <= 1)] = 0
x[(x >= -3) & (x < -1)] = -1
# -1 and 1 are not signif at 95%, so stable
x[(x > 1) & (x <= 3)] = 1
return(np.reshape(x, shp))
@cc.export('ldn_recode_state', 'i2[:,:](i2[:,:])')
def ldn_recode_state(x):
# Recode state into deg, stable, imp. Note the >= -10 is so no data
# isn't coded as degradation. More than two changes in class is defined
# as degradation in state.
shp = x.shape
x = x.ravel()
x[(x > -2) & (x < 2)] = 0
x[(x >= -10) & (x <= -2)] = -1
x[x >= 2] = 1
return(np.reshape(x, shp))
@cc.export('ldn_make_prod5', 'i2[:,:](i2[:,:], i2[:,:], i2[:,:] ,i2[:,:])')
def ldn_make_prod5(traj, state, perf, mask):
# Coding of LPD (prod5)
# 1: declining
# 2: early signs of decline
# 3: stable but stressed
# 4: stable
# 5: improving
# -32768: no data
# Declining = 1
shp = traj.shape
traj = traj.ravel()
state = state.ravel()
perf = perf.ravel()
mask = mask.ravel()
x = traj.copy()
x[traj == -1] = 1
# Stable = 4
x[traj == 0] = 4
# Improving = 5
x[traj == 1] = 5
# Stable due to agreement in perf and state but positive trajectory
x[(traj == 1) & (state == -1) & (perf == -1)] = 4
# Stable but stressed
x[(traj == 0) & (state == 0) & (perf == -1)] = 3
# Early signs of decline
x[(traj == 0) & (state == -1) & (perf == 0)] = 2
# Ensure NAs carry over to productivity indicator layer
x[(traj == -32768) | (perf == -32768) | (state == -32768)] = -32768
# Ensure masked areas carry over to productivity indicator
x[mask == -32767] = -32767
return(np.reshape(x, shp))
@cc.export('ldn_total_by_trans', '(f4[:,:], i2[:,:], f4[:,:])')
def ldn_total_by_trans(d, trans_a, cell_areas):
"""Calculates a total table for an array"""
d = d.ravel()
trans_a = trans_a.ravel()
trans = np.unique(trans_a)
cell_areas = cell_areas.ravel()
# Values less than zero are missing data flags
d[d < 0] = 0
totals = np.zeros(trans.size, dtype=np.float32)
for i in range(trans.size):
# Only sum values for this_trans, and where soc has a valid value
# (negative values are missing data flags)
vals = d[trans_a == trans[i]] * cell_areas[trans_a == trans[i]]
totals[i] += np.sum(vals)
return trans, totals
# @cc.export('ldn_total_by_trans_merge', '(f4[:], i2[:], f4[:], i2[:])')
# def ldn_total_by_trans_merge(total1, trans1, total2, trans2):
# """Calculates a total table for an array"""
# # Combine past totals with these totals
# trans = np.unique(np.concatenate((trans1, trans2)))
# totals = np.zeros(trans.size, dtype=np.float32)
# for i in range(trans.size):
# trans1_loc = np.where(trans1 == trans[i])[0]
# trans2_loc = np.where(trans2 == trans[i])[0]
# if trans1_loc.size > 0:
# totals[i] = totals[i] + total1[trans1_loc[0]]
# if trans2_loc.size > 0:
# totals[i] = totals[i] + total2[trans2_loc[0]]
# return trans, totals
@cc.export('ldn_total_deg', 'f4[4](i2[:,:], b1[:,:], f4[:,:])')
def ldn_total_deg(x, water, cell_areas):
"""Calculates a total table for an array"""
x = x.ravel()
cell_areas = cell_areas.ravel()
x[water.ravel()] = -32767
out = np.zeros((4), dtype=np.float32)
out[0] = np.sum(cell_areas[x == 1])
out[1] = np.sum(cell_areas[x == 0])
out[2] = np.sum(cell_areas[x == -1])
out[3] = np.sum(cell_areas[x == -32768])
return out
if __name__ == "__main__":
cc.compile()
|
ConservationInternational/ldmp-qgis-plugin
|
LDMP/calculate_numba.py
|
Python
|
gpl-2.0
| 4,469
| 0.000671
|
#!/usr/bin/env python
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sample Google App Engine application that demonstrates how to send mail using
Mailgun.
For more information, see README.md.
"""
from urllib import urlencode
import httplib2
import webapp2
# Your Mailgun Domain Name
MAILGUN_DOMAIN_NAME = 'isrealconsulting.com'
# Your Mailgun API key
MAILGUN_API_KEY = 'key-1ffd59a9c3afdcf762f22b21129c13f6'
# [START simple_message]
def send_simple_message(recipient):
http = httplib2.Http()
http.add_credentials('api', MAILGUN_API_KEY)
url = 'https://api.mailgun.net/v3/{}/messages'.format(MAILGUN_DOMAIN_NAME)
data = {
'from': 'Isreal Consulting Webmaster <webmaster@{}>'.format(MAILGUN_DOMAIN_NAME),
'to': recipient,
'subject': 'This is an example email from ICLLC code site codepy',
'text': 'Test message from codepy-1'
}
resp, content = http.request(url, 'POST', urlencode(data))
if resp.status != 200:
raise RuntimeError(
'Mailgun API error: {} {}'.format(resp.status, content))
# [END simple_message]
# [START complex_message]
def send_complex_message(recipient):
http = httplib2.Http()
http.add_credentials('api', MAILGUN_API_KEY)
url = 'https://api.mailgun.net/v3/{}/messages'.format(MAILGUN_DOMAIN_NAME)
data = {
'from': 'Isreal Consulting Webmaster <webmaster@{}>'.format(MAILGUN_DOMAIN_NAME),
'to': recipient,
'subject': 'This is an example email from ICLLC code site codepy',
'text': 'Test message from codepy-1',
'html': '<html>HTML <strong>version</strong> of the body</html>'
}
resp, content = http.request(url, 'POST', urlencode(data))
if resp.status != 200:
raise RuntimeError(
'Mailgun API error: {} {}'.format(resp.status, content))
# [END complex_message]
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.content_type = 'text/html'
self.response.write("""
<!doctype html>
<html><head><title>Isreal Consulting</title></head>
<body>
<form method="POST">
<input type="text" name="recipient" placeholder="Enter recipient email">
<input type="submit" name="submit" value="Send simple email">
<input type="submit" name="submit" value="Send complex email">
</form>
</body></html>
""")
def post(self):
recipient = self.request.get('recipient')
action = self.request.get('submit')
if action == 'Send simple email':
send_simple_message(recipient)
else:
send_complex_message(recipient)
self.response.write('Mail sent')
app = webapp2.WSGIApplication([
('/', MainPage)
], debug=True)
|
isrealconsulting/codepy27
|
main.py
|
Python
|
apache-2.0
| 3,227
| 0.00093
|
"""
Legalese
--------
Copyright (c) 2015, 2016 Genome Research Ltd.
Author: Colin Nolan <cn13@sanger.ac.uk>
This file is part of Cookie Monster.
Cookie Monster is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from unittest.mock import MagicMock
from hgicommon.mixable import Priority
from cookiemonster.cookiejar import CookieJar
from cookiemonster.cookiejar.in_memory_cookiejar import InMemoryCookieJar
from cookiemonster.processor.models import Rule
def create_mock_rule(priority: int=Priority.MIN_PRIORITY) -> Rule:
"""
Creates a mock `Rule` object.
:param priority: (optional) the priority of the rule
:return: the created rule
"""
return Rule(
lambda file_update, data_environment: True,
lambda file_update, data_environment: True,
"my_rule",
priority=priority
)
def create_magic_mock_cookie_jar() -> CookieJar:
"""
Creates a magic mock CookieJar - has the implementation of a CookieJar all methods are implemented using magic mocks
and therefore their usage is recorded.
:return: the created magic mock
"""
cookie_jar = InMemoryCookieJar()
original_get_next_for_processing = cookie_jar.get_next_for_processing
original_enrich_cookie = cookie_jar.enrich_cookie
original_mark_as_failed = cookie_jar.mark_as_complete
original_mark_as_completed = cookie_jar.mark_as_complete
original_mark_as_reprocess = cookie_jar.mark_for_processing
cookie_jar.get_next_for_processing = MagicMock(side_effect=original_get_next_for_processing)
cookie_jar.enrich_cookie = MagicMock(side_effect=original_enrich_cookie)
cookie_jar.mark_as_failed = MagicMock(side_effect=original_mark_as_failed)
cookie_jar.mark_as_complete = MagicMock(side_effect=original_mark_as_completed)
cookie_jar.mark_for_processing = MagicMock(side_effect=original_mark_as_reprocess)
return cookie_jar
|
wtsi-hgi/cookie-monster
|
cookiemonster/tests/processor/_mocks.py
|
Python
|
gpl-3.0
| 2,448
| 0.002451
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.