repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
GoogleCloudPlatform/declarative-resource-client-library
|
python/services/apigee/environment.py
|
1
|
8322
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.apigee import environment_pb2
from google3.cloud.graphite.mmv2.services.google.apigee import environment_pb2_grpc
from typing import List
class Environment(object):
def __init__(
self,
name: str = None,
description: str = None,
created_at: int = None,
last_modified_at: int = None,
properties: dict = None,
display_name: str = None,
state: str = None,
organization: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.description = description
self.properties = properties
self.display_name = display_name
self.organization = organization
self.service_account_file = service_account_file
def apply(self):
stub = environment_pb2_grpc.ApigeeEnvironmentServiceStub(channel.Channel())
request = environment_pb2.ApplyApigeeEnvironmentRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if EnvironmentProperties.to_proto(self.properties):
request.resource.properties.CopyFrom(
EnvironmentProperties.to_proto(self.properties)
)
else:
request.resource.ClearField("properties")
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.organization):
request.resource.organization = Primitive.to_proto(self.organization)
request.service_account_file = self.service_account_file
response = stub.ApplyApigeeEnvironment(request)
self.name = Primitive.from_proto(response.name)
self.description = Primitive.from_proto(response.description)
self.created_at = Primitive.from_proto(response.created_at)
self.last_modified_at = Primitive.from_proto(response.last_modified_at)
self.properties = EnvironmentProperties.from_proto(response.properties)
self.display_name = Primitive.from_proto(response.display_name)
self.state = EnvironmentStateEnum.from_proto(response.state)
self.organization = Primitive.from_proto(response.organization)
def delete(self):
stub = environment_pb2_grpc.ApigeeEnvironmentServiceStub(channel.Channel())
request = environment_pb2.DeleteApigeeEnvironmentRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if EnvironmentProperties.to_proto(self.properties):
request.resource.properties.CopyFrom(
EnvironmentProperties.to_proto(self.properties)
)
else:
request.resource.ClearField("properties")
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.organization):
request.resource.organization = Primitive.to_proto(self.organization)
response = stub.DeleteApigeeEnvironment(request)
@classmethod
def list(self, organization, service_account_file=""):
stub = environment_pb2_grpc.ApigeeEnvironmentServiceStub(channel.Channel())
request = environment_pb2.ListApigeeEnvironmentRequest()
request.service_account_file = service_account_file
request.Organization = organization
return stub.ListApigeeEnvironment(request).items
def to_proto(self):
resource = environment_pb2.ApigeeEnvironment()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if EnvironmentProperties.to_proto(self.properties):
resource.properties.CopyFrom(
EnvironmentProperties.to_proto(self.properties)
)
else:
resource.ClearField("properties")
if Primitive.to_proto(self.display_name):
resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.organization):
resource.organization = Primitive.to_proto(self.organization)
return resource
class EnvironmentProperties(object):
def __init__(self, property: list = None):
self.property = property
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = environment_pb2.ApigeeEnvironmentProperties()
if EnvironmentPropertiesPropertyArray.to_proto(resource.property):
res.property.extend(
EnvironmentPropertiesPropertyArray.to_proto(resource.property)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentProperties(
property=EnvironmentPropertiesPropertyArray.from_proto(resource.property),
)
class EnvironmentPropertiesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EnvironmentProperties.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EnvironmentProperties.from_proto(i) for i in resources]
class EnvironmentPropertiesProperty(object):
def __init__(self, name: str = None, value: str = None):
self.name = name
self.value = value
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = environment_pb2.ApigeeEnvironmentPropertiesProperty()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.value):
res.value = Primitive.to_proto(resource.value)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EnvironmentPropertiesProperty(
name=Primitive.from_proto(resource.name),
value=Primitive.from_proto(resource.value),
)
class EnvironmentPropertiesPropertyArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EnvironmentPropertiesProperty.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EnvironmentPropertiesProperty.from_proto(i) for i in resources]
class EnvironmentStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return environment_pb2.ApigeeEnvironmentStateEnum.Value(
"ApigeeEnvironmentStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return environment_pb2.ApigeeEnvironmentStateEnum.Name(resource)[
len("ApigeeEnvironmentStateEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
apache-2.0
| -8,753,835,329,695,119,000
| 34.716738
| 86
| 0.66883
| false
| 4.302999
| false
| false
| false
|
igorcoding/asynctnt
|
asynctnt/exceptions.py
|
1
|
4098
|
import enum
class TarantoolError(Exception):
"""
Base Tarantool Exception class
"""
pass
class TarantoolSchemaError(TarantoolError):
"""
Exception is raised when any problems with schema occurred
"""
pass
class TarantoolDatabaseError(TarantoolError):
"""
Exception is raised when Tarantool responds with code != 0
"""
def __init__(self, code, message):
super(TarantoolDatabaseError, self).__init__(code, message)
self.code = code
self.message = message
class TarantoolNetworkError(TarantoolError):
pass
class TarantoolNotConnectedError(TarantoolNetworkError):
"""
Raised when asynctnt is not connected to Tarantool
"""
pass
class ErrorCode(enum.IntEnum):
"""
Tarantool default error codes
"""
ER_UNKNOWN = 0
ER_ILLEGAL_PARAMS = 1
ER_MEMORY_ISSUE = 2
ER_TUPLE_FOUND = 3
ER_TUPLE_NOT_FOUND = 4
ER_UNSUPPORTED = 5
ER_NONMASTER = 6
ER_READONLY = 7
ER_INJECTION = 8
ER_CREATE_SPACE = 9
ER_SPACE_EXISTS = 10
ER_DROP_SPACE = 11
ER_ALTER_SPACE = 12
ER_INDEX_TYPE = 13
ER_MODIFY_INDEX = 14
ER_LAST_DROP = 15
ER_TUPLE_FORMAT_LIMIT = 16
ER_DROP_PRIMARY_KEY = 17
ER_KEY_PART_TYPE = 18
ER_EXACT_MATCH = 19
ER_INVALID_MSGPACK = 20
ER_PROC_RET = 21
ER_TUPLE_NOT_ARRAY = 22
ER_FIELD_TYPE = 23
ER_FIELD_TYPE_MISMATCH = 24
ER_SPLICE = 25
ER_ARG_TYPE = 26
ER_TUPLE_IS_TOO_LONG = 27
ER_UNKNOWN_UPDATE_OP = 28
ER_UPDATE_FIELD = 29
ER_FIBER_STACK = 30
ER_KEY_PART_COUNT = 31
ER_PROC_LUA = 32
ER_NO_SUCH_PROC = 33
ER_NO_SUCH_TRIGGER = 34
ER_NO_SUCH_INDEX = 35
ER_NO_SUCH_SPACE = 36
ER_NO_SUCH_FIELD = 37
ER_EXACT_FIELD_COUNT = 38
ER_INDEX_FIELD_COUNT = 39
ER_WAL_IO = 40
ER_MORE_THAN_ONE_TUPLE = 41
ER_ACCESS_DENIED = 42
ER_CREATE_USER = 43
ER_DROP_USER = 44
ER_NO_SUCH_USER = 45
ER_USER_EXISTS = 46
ER_PASSWORD_MISMATCH = 47
ER_UNKNOWN_REQUEST_TYPE = 48
ER_UNKNOWN_SCHEMA_OBJECT = 49
ER_CREATE_FUNCTION = 50
ER_NO_SUCH_FUNCTION = 51
ER_FUNCTION_EXISTS = 52
ER_FUNCTION_ACCESS_DENIED = 53
ER_FUNCTION_MAX = 54
ER_SPACE_ACCESS_DENIED = 55
ER_USER_MAX = 56
ER_NO_SUCH_ENGINE = 57
ER_RELOAD_CFG = 58
ER_CFG = 59
ER_VINYL = 60
ER_LOCAL_SERVER_IS_NOT_ACTIVE = 61
ER_UNKNOWN_SERVER = 62
ER_CLUSTER_ID_MISMATCH = 63
ER_INVALID_UUID = 64
ER_CLUSTER_ID_IS_RO = 65
ER_SERVER_ID_MISMATCH = 66
ER_SERVER_ID_IS_RESERVED = 67
ER_INVALID_ORDER = 68
ER_MISSING_REQUEST_FIELD = 69
ER_IDENTIFIER = 70
ER_DROP_FUNCTION = 71
ER_ITERATOR_TYPE = 72
ER_REPLICA_MAX = 73
ER_INVALID_XLOG = 74
ER_INVALID_XLOG_NAME = 75
ER_INVALID_XLOG_ORDER = 76
ER_NO_CONNECTION = 77
ER_TIMEOUT = 78
ER_ACTIVE_TRANSACTION = 79
ER_NO_ACTIVE_TRANSACTION = 80
ER_CROSS_ENGINE_TRANSACTION = 81
ER_NO_SUCH_ROLE = 82
ER_ROLE_EXISTS = 83
ER_CREATE_ROLE = 84
ER_INDEX_EXISTS = 85
ER_TUPLE_REF_OVERFLOW = 86
ER_ROLE_LOOP = 87
ER_GRANT = 88
ER_PRIV_GRANTED = 89
ER_ROLE_GRANTED = 90
ER_PRIV_NOT_GRANTED = 91
ER_ROLE_NOT_GRANTED = 92
ER_MISSING_SNAPSHOT = 93
ER_CANT_UPDATE_PRIMARY_KEY = 94
ER_UPDATE_INTEGER_OVERFLOW = 95
ER_GUEST_USER_PASSWORD = 96
ER_TRANSACTION_CONFLICT = 97
ER_UNSUPPORTED_ROLE_PRIV = 98
ER_LOAD_FUNCTION = 99
ER_FUNCTION_LANGUAGE = 100
ER_RTREE_RECT = 101
ER_PROC_C = 102
ER_UNKNOWN_RTREE_INDEX_DISTANCE_TYPE = 103
ER_PROTOCOL = 104
ER_UPSERT_UNIQUE_SECONDARY_KEY = 105
ER_WRONG_INDEX_RECORD = 106
ER_WRONG_INDEX_PARTS = 107
ER_WRONG_INDEX_OPTIONS = 108
ER_WRONG_SCHEMA_VERSION = 109
ER_SLAB_ALLOC_MAX = 110
ER_WRONG_SPACE_OPTIONS = 111
ER_UNSUPPORTED_INDEX_FEATURE = 112
ER_VIEW_IS_RO = 113
ER_SERVER_UUID_MISMATCH = 114
ER_SYSTEM = 115
ER_LOADING = 116
ER_CONNECTION_TO_SELF = 117
ER_KEY_PART_IS_TOO_LONG = 118
ER_COMPRESSION = 119
|
apache-2.0
| 3,441,161,504,051,915,000
| 24.296296
| 67
| 0.624451
| false
| 2.787755
| false
| false
| false
|
wxdwfc/security_lab
|
exploit-template.py
|
1
|
1889
|
#!/usr/bin/python
import sys
import socket
import traceback
import urllib
import struct
####
## You might find it useful to define variables that store various
## stack or function addresses from the zookd / zookfs processes,
## which you can then use in build_exploit(); the following are just
## examples.
stack_buffer = 0x34567890
stack_saved_ebp = 0x12345678
stack_retaddr = stack_saved_ebp + 4
## This is the function that you should modify to construct an
## HTTP request that will cause a buffer overflow in some part
## of the zookws web server and exploit it.
def build_exploit(shellcode):
## Things that you might find useful in constructing your exploit:
## urllib.quote(s)
## returns string s with "special" characters percent-encoded
## struct.pack("<I", x)
## returns the 4-byte binary encoding of the 32-bit integer x
## variables for program addresses (ebp, buffer, retaddr=ebp+4)
req = "GET /test HTTP/1.0\r\n" + \
"\r\n"
return req
####
def send_req(host, port, req):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to %s:%d..." % (host, port))
sock.connect((host, port))
print("Connected, sending request...")
sock.send(req)
print("Request sent, waiting for reply...")
rbuf = sock.recv(1024)
resp = ""
while len(rbuf):
resp = resp + rbuf
rbuf = sock.recv(1024)
print("Received reply.")
sock.close()
return resp
####
if len(sys.argv) != 3:
print("Usage: " + sys.argv[0] + " host port")
exit()
try:
shellfile = open("shellcode.bin", "r")
shellcode = shellfile.read()
req = build_exploit(shellcode)
print("HTTP request:")
print(req)
resp = send_req(sys.argv[1], int(sys.argv[2]), req)
print("HTTP response:")
print(resp)
except:
print("Exception:")
print(traceback.format_exc())
|
mit
| 4,728,255,385,788,143,000
| 24.186667
| 70
| 0.651668
| false
| 3.415913
| false
| false
| false
|
dochevviktor/coding_challenges
|
DFCL2-KeyFinder.py
|
1
|
2118
|
import time
import sys
"""
Goal here is to decrypt cipher by brute force
Using bytes seemed to improve speed to a certain degree
"""
cipher =[0x5499fa99, 0x1ee7d8da, 0x5df0b78b, 0x1cb0c18c, 0x10f09fc5, 0x4bb7fdae, 0x7fcb95ac,
0xe494fbae, 0x8f5d90a3, 0xc766fdd7, 0xb7399ecc, 0xbf4af592, 0xf35c9dc2, 0x272be2a4,
0x5e788697, 0x520febd8, 0x468c808c, 0x2e550ac9, 0x2b4d28b7, 0x4c166789, 0x33df0bec,
0x67a96778, 0x0ffa0ce3, 0x44cd2a9a, 0x2dc208dc, 0x35c26a9d, 0x658b0fd7, 0x0d006482,
0x46c90cf8, 0x28d72a79, 0x4ea94be5, 0x1bbc6995, 0x478505d3, 0x7b1a6b8d, 0xaf7408db,
0xef7d7f9f, 0x76471cc6, 0xef1076b4, 0x6c911aa7, 0xe75a7ed3, 0x89630c8d, 0xf32b7fcb,
0x697c1e89, 0x091c30be, 0x736a4cbf, 0xe27339bb, 0x9a2a52a2]
text = [""]*46
try:
i1 = int(sys.argv[1])
except:
print ("A number from 0 to 3 needs to be specified as an argument (use DFCL2-Pypy Launcher.bat)!")
sys.exit()
answer = ""
test = "2074686520" # = (space)the(space)
flag = 0x0
# Deny Range (for now its easier to check, mmk ?)
deny = {"0"}
for i in xrange(0,31):
deny.add(hex(i).lstrip("0x"))
for i in xrange(121,256):
deny.add(hex(i).lstrip("0x"))
deny = frozenset(deny)
program_starts = time.time()
# 0x2710 = 10000
# 0x7530 = 30000
# 0xc350 = 50000
iter_print = 0xc350 + i1
while i1 < 0xFFFFFFFF:
if i1 % iter_print == 0:
#every n'th iteration, print progress and speed
now = time.time()
print("%.2f" % (((float(i1)+0.000001)/0xFFFFFFFF)*0x64))+"% - ",("%.2f" % (now - program_starts)) , "ms"
program_starts = time.time()
for i in xrange(0x2e,0x0,-0x1):
a = hex(((cipher[i-1] + i1) % 0x100000000)^cipher[i])[0x2:0xa]
# This will reject most keys that produce forbidden characters, but not all
if a in deny:
break
flag = 0x1
text[i-0x1] = a
if flag == 0x1:
if test in "".join(text):
f = open('test.txt', 'a')
f.write(str(i1)+"\n")
f.close()
print "Possible Keys are: "+str(i1)
flag << 0x4
i1+=4
print answer
|
mit
| 8,956,394,434,768,525,000
| 33.16129
| 112
| 0.629367
| false
| 2.371781
| false
| false
| false
|
eriklindernoren/Keras-GAN
|
pixelda/data_loader.py
|
1
|
3259
|
import scipy
from glob import glob
import numpy as np
from keras.datasets import mnist
from skimage.transform import resize as imresize
import pickle
import os
import urllib
import gzip
class DataLoader():
"""Loads images from MNIST (domain A) and MNIST-M (domain B)"""
def __init__(self, img_res=(128, 128)):
self.img_res = img_res
self.mnistm_url = 'https://github.com/VanushVaswani/keras_mnistm/releases/download/1.0/keras_mnistm.pkl.gz'
self.setup_mnist(img_res)
self.setup_mnistm(img_res)
def normalize(self, images):
return images.astype(np.float32) / 127.5 - 1.
def setup_mnist(self, img_res):
print ("Setting up MNIST...")
if not os.path.exists('datasets/mnist_x.npy'):
# Load the dataset
(mnist_X, mnist_y), (_, _) = mnist.load_data()
# Normalize and rescale images
mnist_X = self.normalize(mnist_X)
mnist_X = np.array([imresize(x, img_res) for x in mnist_X])
mnist_X = np.expand_dims(mnist_X, axis=-1)
mnist_X = np.repeat(mnist_X, 3, axis=-1)
self.mnist_X, self.mnist_y = mnist_X, mnist_y
# Save formatted images
np.save('datasets/mnist_x.npy', self.mnist_X)
np.save('datasets/mnist_y.npy', self.mnist_y)
else:
self.mnist_X = np.load('datasets/mnist_x.npy')
self.mnist_y = np.load('datasets/mnist_y.npy')
print ("+ Done.")
def setup_mnistm(self, img_res):
print ("Setting up MNIST-M...")
if not os.path.exists('datasets/mnistm_x.npy'):
# Download the MNIST-M pkl file
filepath = 'datasets/keras_mnistm.pkl.gz'
if not os.path.exists(filepath.replace('.gz', '')):
print('+ Downloading ' + self.mnistm_url)
data = urllib.request.urlopen(self.mnistm_url)
with open(filepath, 'wb') as f:
f.write(data.read())
with open(filepath.replace('.gz', ''), 'wb') as out_f, \
gzip.GzipFile(filepath) as zip_f:
out_f.write(zip_f.read())
os.unlink(filepath)
# load MNIST-M images from pkl file
with open('datasets/keras_mnistm.pkl', "rb") as f:
data = pickle.load(f, encoding='bytes')
# Normalize and rescale images
mnistm_X = np.array(data[b'train'])
mnistm_X = self.normalize(mnistm_X)
mnistm_X = np.array([imresize(x, img_res) for x in mnistm_X])
self.mnistm_X, self.mnistm_y = mnistm_X, self.mnist_y.copy()
# Save formatted images
np.save('datasets/mnistm_x.npy', self.mnistm_X)
np.save('datasets/mnistm_y.npy', self.mnistm_y)
else:
self.mnistm_X = np.load('datasets/mnistm_x.npy')
self.mnistm_y = np.load('datasets/mnistm_y.npy')
print ("+ Done.")
def load_data(self, domain, batch_size=1):
X = self.mnist_X if domain == 'A' else self.mnistm_X
y = self.mnist_y if domain == 'A' else self.mnistm_y
idx = np.random.choice(list(range(len(X))), size=batch_size)
return X[idx], y[idx]
|
mit
| -2,877,848,713,173,794,000
| 33.305263
| 115
| 0.562136
| false
| 3.359794
| false
| false
| false
|
ActiveState/code
|
recipes/Python/498106_Convert_formulstring_implied_multiplicatiproper_/recipe-498106.py
|
1
|
1913
|
def toProperFormula(s):
"""
Given formula string, returns a modified formula with missing
multiplication symbols and grouping symbols [],{} replaced by parentheses.
Only primitive error checking for mismatched grouping symbols is shown in
this recipe.
author: ernesto.adorio@gmail.com, ernie@extremecomputing.org
"""
import tokenize
from cStringIO import StringIO
f = StringIO(s)
# Make variables mutable to child function.
formula = [""]
prevtoken = [""]
prevsymbol = [""]
closers = []
def handle_token(type, token, (srow, scol), (erow, ecol), line):
token = str(token)
symbol = tokenize.tok_name[type]
if symbol == "OP":
if token == ")":
if closers.pop() != "(": raise FormulaError('Error: "' +line[:ecol] + '" unbalanced ).')
elif token == "]":
if closers.pop() != "[": raise FormulaError('Error: "' +line[:ecol] + '" unbalanced ].')
token = ")"
elif token == "}":
if closers.pop() != "{": raise FormulaError('Error: "' +line[:ecol] + '" unbalanced }.')
token = ")"
elif token in ["(", "[", "{"]:
closers.append(token)
if prevtoken[0] == ")" or prevsymbol[0] == "NUMBER":
formula[0] += "*"
token = "("
elif symbol in ["NAME", "NUMBER"]:
if prevtoken[0] == ")" or prevsymbol[0] in ["NAME", "NUMBER"]:
formula[0] += "*"
formula[0] += token
prevtoken[0] = token
prevsymbol[0] = symbol
tokenize.tokenize(f.readline, handle_token)
return formula[0]
print toProperFormula("2 ( 23.45x - 4y) [34 - 5 x] + w^[atan2(2y, 4x)] 5")
"""
2*(23.45*x-4*y)*(34-5*x)+w^(atan2(2*y,4*x))*5
"""
|
mit
| -3,109,650,680,688,000,000
| 32.561404
| 106
| 0.500261
| false
| 3.707364
| false
| false
| false
|
mattilyra/gensim
|
gensim/downloader.py
|
1
|
14126
|
"""
This module is an API for downloading, getting information and loading datasets/models.
Give information about available models/datasets:
>>> import gensim.downloader as api
>>>
>>> api.info() # return dict with info about available models/datasets
>>> api.info("text8") # return dict with info about "text8" dataset
Model example:
>>> import gensim.downloader as api
>>>
>>> model = api.load("glove-twitter-25") # load glove vectors
>>> model.most_similar("cat") # show words that similar to word 'cat'
Dataset example:
>>> import gensim.downloader as api
>>> from gensim.models import Word2Vec
>>>
>>> dataset = api.load("text8") # load dataset as iterable
>>> model = Word2Vec(dataset) # train w2v model
Also, this API available via CLI::
python -m gensim.downloader --info <dataname> # same as api.info(dataname)
python -m gensim.downloader --download <dataname> # same as api.load(dataname, return_path=True)
"""
from __future__ import absolute_import
import argparse
import os
import json
import logging
import sys
import errno
import hashlib
import math
import shutil
import tempfile
from functools import partial
if sys.version_info[0] == 2:
import urllib
from urllib2 import urlopen
else:
import urllib.request as urllib
from urllib.request import urlopen
user_dir = os.path.expanduser('~')
base_dir = os.path.join(user_dir, 'gensim-data')
logger = logging.getLogger('gensim.api')
DATA_LIST_URL = "https://raw.githubusercontent.com/RaRe-Technologies/gensim-data/master/list.json"
DOWNLOAD_BASE_URL = "https://github.com/RaRe-Technologies/gensim-data/releases/download"
def _progress(chunks_downloaded, chunk_size, total_size, part=1, total_parts=1):
"""Reporthook for :func:`urllib.urlretrieve`, code from [1]_.
Parameters
----------
chunks_downloaded : int
Number of chunks of data that have been downloaded.
chunk_size : int
Size of each chunk of data.
total_size : int
Total size of the dataset/model.
part : int, optional
Number of current part, used only if `no_parts` > 1.
total_parts : int, optional
Total number of parts.
References
----------
[1] https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
"""
bar_len = 50
size_downloaded = float(chunks_downloaded * chunk_size)
filled_len = int(math.floor((bar_len * size_downloaded) / total_size))
percent_downloaded = round(((size_downloaded * 100) / total_size), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
if total_parts == 1:
sys.stdout.write(
'\r[%s] %s%s %s/%sMB downloaded' % (
bar, percent_downloaded, "%",
round(size_downloaded / (1024 * 1024), 1),
round(float(total_size) / (1024 * 1024), 1))
)
sys.stdout.flush()
else:
sys.stdout.write(
'\r Part %s/%s [%s] %s%s %s/%sMB downloaded' % (
part + 1, total_parts, bar, percent_downloaded, "%",
round(size_downloaded / (1024 * 1024), 1),
round(float(total_size) / (1024 * 1024), 1))
)
sys.stdout.flush()
def _create_base_dir():
"""Create the gensim-data directory in home directory, if it has not been already created.
Raises
------
Exception
An exception is raised when read/write permissions are not available or a file named gensim-data
already exists in the home directory.
"""
if not os.path.isdir(base_dir):
try:
logger.info("Creating %s", base_dir)
os.makedirs(base_dir)
except OSError as e:
if e.errno == errno.EEXIST:
raise Exception(
"Not able to create folder gensim-data in {}. File gensim-data "
"exists in the direcory already.".format(user_dir)
)
else:
raise Exception(
"Can't create {}. Make sure you have the read/write permissions "
"to the directory or you can try creating the folder manually"
.format(base_dir)
)
def _calculate_md5_checksum(fname):
"""Calculate the checksum of the file, exactly same as md5-sum linux util.
Parameters
----------
fname : str
Path to the file.
Returns
-------
str
MD5-hash of file names as `fname`.
"""
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def info(name=None, show_only_latest=True):
"""Provide the information related to model/dataset.
Parameters
----------
name : str, optional
Name of model/dataset. If not set - shows all available data.
show_only_latest : bool, optional
If storage contains different versions for one data/model, this flag allow to hide outdated versions.
Affects only if `name` is None.
Returns
-------
dict
Detailed information about one or all models/datasets.
If name is specified, return full information about concrete dataset/model,
otherwise, return information about all available datasets/models.
Raises
------
Exception
If name that has been passed is incorrect.
Examples
--------
>>> import gensim.downloader as api
>>> api.info("text8") # retrieve information about text8 dataset
{u'checksum': u'68799af40b6bda07dfa47a32612e5364',
u'description': u'Cleaned small sample from wikipedia',
u'file_name': u'text8.gz',
u'parts': 1,
u'source': u'http://mattmahoney.net/dc/text8.zip'}
>>>
>>> api.info() # retrieve information about all available datasets and models
"""
information = json.loads(urlopen(DATA_LIST_URL).read().decode("utf-8"))
if name is not None:
corpora = information['corpora']
models = information['models']
if name in corpora:
return information['corpora'][name]
elif name in models:
return information['models'][name]
else:
raise ValueError("Incorrect model/corpus name")
if not show_only_latest:
return information
return {
"corpora": {name: data for (name, data) in information['corpora'].items() if data.get("latest", True)},
"models": {name: data for (name, data) in information['models'].items() if data.get("latest", True)}
}
def _get_checksum(name, part=None):
"""Retrieve the checksum of the model/dataset from gensim-data repository.
Parameters
----------
name : str
Dataset/model name.
part : int, optional
Number of part (for multipart data only).
Returns
-------
str
Retrieved checksum of dataset/model.
"""
information = info()
corpora = information['corpora']
models = information['models']
if part is None:
if name in corpora:
return information['corpora'][name]["checksum"]
elif name in models:
return information['models'][name]["checksum"]
else:
if name in corpora:
return information['corpora'][name]["checksum-{}".format(part)]
elif name in models:
return information['models'][name]["checksum-{}".format(part)]
def _get_parts(name):
"""Retrieve the number of parts in which dataset/model has been split.
Parameters
----------
name: str
Dataset/model name.
Returns
-------
int
Number of parts in which dataset/model has been split.
"""
information = info()
corpora = information['corpora']
models = information['models']
if name in corpora:
return information['corpora'][name]["parts"]
elif name in models:
return information['models'][name]["parts"]
def _download(name):
"""Download and extract the dataset/model.
Parameters
----------
name: str
Dataset/model name which has to be downloaded.
Raises
------
Exception
If md5sum on client and in repo are different.
"""
url_load_file = "{base}/{fname}/__init__.py".format(base=DOWNLOAD_BASE_URL, fname=name)
data_folder_dir = os.path.join(base_dir, name)
data_folder_dir_tmp = data_folder_dir + '_tmp'
tmp_dir = tempfile.mkdtemp()
init_path = os.path.join(tmp_dir, "__init__.py")
urllib.urlretrieve(url_load_file, init_path)
total_parts = _get_parts(name)
if total_parts > 1:
concatenated_folder_name = "{fname}.gz".format(fname=name)
concatenated_folder_dir = os.path.join(tmp_dir, concatenated_folder_name)
for part in range(0, total_parts):
url_data = "{base}/{fname}/{fname}.gz_0{part}".format(base=DOWNLOAD_BASE_URL, fname=name, part=part)
fname = "{f}.gz_0{p}".format(f=name, p=part)
dst_path = os.path.join(tmp_dir, fname)
urllib.urlretrieve(
url_data, dst_path,
reporthook=partial(_progress, part=part, total_parts=total_parts)
)
if _calculate_md5_checksum(dst_path) == _get_checksum(name, part):
sys.stdout.write("\n")
sys.stdout.flush()
logger.info("Part %s/%s downloaded", part + 1, total_parts)
else:
shutil.rmtree(tmp_dir)
raise Exception("Checksum comparison failed, try again")
with open(concatenated_folder_dir, 'wb') as wfp:
for part in range(0, total_parts):
part_path = os.path.join(tmp_dir, "{fname}.gz_0{part}".format(fname=name, part=part))
with open(part_path, "rb") as rfp:
shutil.copyfileobj(rfp, wfp)
os.remove(part_path)
else:
url_data = "{base}/{fname}/{fname}.gz".format(base=DOWNLOAD_BASE_URL, fname=name)
fname = "{fname}.gz".format(fname=name)
dst_path = os.path.join(tmp_dir, fname)
urllib.urlretrieve(url_data, dst_path, reporthook=_progress)
if _calculate_md5_checksum(dst_path) == _get_checksum(name):
sys.stdout.write("\n")
sys.stdout.flush()
logger.info("%s downloaded", name)
else:
shutil.rmtree(tmp_dir)
raise Exception("Checksum comparison failed, try again")
if os.path.exists(data_folder_dir_tmp):
os.remove(data_folder_dir_tmp)
shutil.move(tmp_dir, data_folder_dir_tmp)
os.rename(data_folder_dir_tmp, data_folder_dir)
def _get_filename(name):
"""Retrieve the filename of the dataset/model.
Parameters
----------
name: str
Name of dataset/model.
Returns
-------
str:
Filename of the dataset/model.
"""
information = info()
corpora = information['corpora']
models = information['models']
if name in corpora:
return information['corpora'][name]["file_name"]
elif name in models:
return information['models'][name]["file_name"]
def load(name, return_path=False):
"""Download (if needed) dataset/model and load it to memory (unless `return_path` is set).
Parameters
----------
name: str
Name of the model/dataset.
return_path: bool, optional
If True, return full path to file, otherwise, return loaded model / iterable dataset.
Returns
-------
Model
Requested model, if `name` is model and `return_path` == False.
Dataset (iterable)
Requested dataset, if `name` is dataset and `return_path` == False.
str
Path to file with dataset / model, only when `return_path` == True.
Raises
------
Exception
Raised if `name` is incorrect.
Examples
--------
Model example:
>>> import gensim.downloader as api
>>>
>>> model = api.load("glove-twitter-25") # load glove vectors
>>> model.most_similar("cat") # show words that similar to word 'cat'
Dataset example:
>>> import gensim.downloader as api
>>>
>>> wiki = api.load("wiki-en") # load extracted Wikipedia dump, around 6 Gb
>>> for article in wiki: # iterate over all wiki script
>>> ...
Download only example
>>> import gensim.downloader as api
>>>
>>> print(api.load("wiki-en", return_path=True)) # output: /home/user/gensim-data/wiki-en/wiki-en.gz
"""
_create_base_dir()
file_name = _get_filename(name)
if file_name is None:
raise ValueError("Incorrect model/corpus name")
folder_dir = os.path.join(base_dir, name)
path = os.path.join(folder_dir, file_name)
if not os.path.exists(folder_dir):
_download(name)
if return_path:
return path
else:
sys.path.insert(0, base_dir)
module = __import__(name)
return module.load_data()
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s : %(name)s : %(levelname)s : %(message)s', stream=sys.stdout, level=logging.INFO
)
parser = argparse.ArgumentParser(
description="Gensim console API",
usage="python -m gensim.api.downloader [-h] [-d data_name | -i data_name | -c]"
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-d", "--download", metavar="data_name", nargs=1,
help="To download a corpus/model : python -m gensim.downloader -d <dataname>"
)
full_information = 1
group.add_argument(
"-i", "--info", metavar="data_name", nargs='?', const=full_information,
help="To get information about a corpus/model : python -m gensim.downloader -i <dataname>"
)
args = parser.parse_args()
if args.download is not None:
data_path = load(args.download[0], return_path=True)
logger.info("Data has been installed and data path is %s", data_path)
elif args.info is not None:
output = info() if (args.info == full_information) else info(name=args.info)
print(json.dumps(output, indent=4))
|
lgpl-2.1
| -6,188,187,554,405,224,000
| 30.53125
| 112
| 0.606116
| false
| 3.829222
| false
| false
| false
|
rycus86/docker-pygen
|
tests/test_generator.py
|
1
|
4910
|
import pygen
from unittest_helper import BaseDockerTestCase
class GeneratorTest(BaseDockerTestCase):
app = None
def tearDown(self):
super(GeneratorTest, self).tearDown()
if hasattr(self, 'app') and self.app:
self.app.api.close()
def test_generate(self):
test_container = self.start_container(environment=['GENERATOR=pygen'])
self.app = pygen.PyGen(template="""#
{% for container in containers %}
running: {{ container.name }} ID={{ container.short_id }}
{% for key, value in container.env.items() %}
env: {{ key }}=>{{ value }}
{% endfor %}
{% endfor %}""")
content = self.app.generate()
self.assertIn('running: %s' % test_container.name, content)
self.assertIn('ID=%s' % test_container.short_id, content)
self.assertIn('env: GENERATOR=>pygen', content)
def test_generate_with_groups(self):
self.start_container(environment=['GENERATOR=pygen'],
labels={'instance': '001',
'application': 'web'})
self.start_container(environment=['GENERATOR=pygen'],
labels={'instance': '002',
'application': 'web'})
self.start_container(environment=['GENERATOR=pygen'],
labels={'instance': '003',
'application': 'db'})
self.app = pygen.PyGen(template="""#
{% for key, containers in containers|groupby('labels.application') %}
group: {{ key }}
{% for container in containers %}
instance: {{ container.labels.instance }}
{% endfor %}
{% endfor %}""")
content = self.app.generate()
self.assertIn('group: web', content)
self.assertIn('group: db', content)
for num in range(1, 4):
self.assertIn('instance: %03d' % num, content)
def test_nginx_template(self):
self.start_container(name='pygen-test-nginx-1', labels={'virtual-host': 'test.site.com'}, ports={8080: None})
self.start_container(name='pygen-test-nginx-2', labels={'virtual-host': 'test.site.com'}, ports={8080: None})
self.start_container(name='pygen-test-nginx-3', labels={'virtual-host': 'www.site.com'}, ports={8080: None})
self.start_container(name='pygen-test-nginx-4', labels={'virtual-host': 'api.site.com',
'context-path': '/rest'}, ports={5000: None})
self.start_container(name='pygen-test-nginx-5', labels={'virtual-host': 'api.site.com',
'context-path': '/stream'}, ports={5000: None})
self.start_container(name='pygen-test-nginx-6', labels={'virtual-host': 'api.site.com',
'context-path': '/no-port-exposed'})
self.start_container(name='pygen-test-nginx-7', labels={'context-path': '/no-virtual-host'}, ports={9001: None})
self.app = pygen.PyGen(template=self.relative('templates/nginx.example'))
content = self.app.generate()
# pygen-test-nginx-1 : test.site.com/ 8080
self.assertIn('# pygen-test-nginx-1', content)
# pygen-test-nginx-2 : test.site.com/ 8080
self.assertIn('# pygen-test-nginx-2', content)
# pygen-test-nginx-3 : www.site.com/ 8080
self.assertIn('# pygen-test-nginx-3', content)
# pygen-test-nginx-4 : api.site.com/rest 5000
self.assertIn('# pygen-test-nginx-4', content)
# pygen-test-nginx-5 : api.site.com/stream 5000
self.assertIn('# pygen-test-nginx-5', content)
# pygen-test-nginx-6 : - /no-port-exposed
self.assertNotIn('pygen-test-nginx-6', content)
# pygen-test-nginx-7 : - /no-virtual-host 9001
self.assertNotIn('pygen-test-nginx-7', content)
for upstream in ('test.site.com___', 'www.site.com___', 'api.site.com___rest', 'api.site.com___stream'):
self.assertIn('upstream %s ' % upstream, content)
self.assertIn('proxy_pass http://%s;' % upstream, content)
self.assertNotIn('upstream api.site.com___ ', content)
self.assertIn('location / ', content)
self.assertIn('location /rest ', content)
self.assertIn('location /stream ', content)
for num in range(1, 6):
container = self.docker_client.containers.get('pygen-test-nginx-%d' % num)
ip_address = next(iter(container.attrs['NetworkSettings']['Networks'].values())).get('IPAddress')
port = next(iter(container.attrs['Config'].get('ExposedPorts', dict()).keys())).split('/')[0]
self.assertIn('server %s:%s;' % (ip_address, port), content)
|
mit
| -8,002,882,736,790,479,000
| 45.761905
| 120
| 0.556823
| false
| 3.953301
| true
| false
| false
|
marcosfede/algorithms
|
tree/binary_tree/path_sum/path_sum/path_sum2.py
|
1
|
1375
|
def path_sum(root, sum):
if not root:
return []
res = []
DFS(root, sum, [], res)
return res
def DFS(root, sum, ls, res):
if not root.left and not root.right and root.val == sum:
ls.append(root.val)
res.append(ls)
if root.left:
DFS(root.left, sum - root.val, ls + [root.val], res)
if root.right:
DFS(root.right, sum - root.val, ls + [root.val], res)
# DFS with stack
def path_sum2(root, s):
if not root:
return []
res = []
stack = [(root, [root.val])]
while stack:
node, ls = stack.pop()
if not node.left and not node.right and sum(ls) == s:
res.append(ls)
if node.left:
stack.append((node.left, ls + [node.left.val]))
if node.right:
stack.append((node.right, ls + [node.right.val]))
return res
# BFS with queue
def path_sum3(root, sum):
if not root:
return []
res = []
queue = [(root, root.val, [root.val])]
while queue:
node, val, ls = queue.pop(0) # popleft
if not node.left and not node.right and val == sum:
res.append(ls)
if node.left:
queue.append((node.left, val + node.left.val, ls + [node.left.val]))
if node.right:
queue.append((node.right, val + node.right.val, ls + [node.right.val]))
return res
|
gpl-3.0
| -5,353,832,707,150,959,000
| 26.5
| 83
| 0.537455
| false
| 3.205128
| false
| false
| false
|
alerta/python-alerta
|
alertaclient/commands/cmd_heartbeats.py
|
1
|
5322
|
import json
import click
from tabulate import tabulate
from alertaclient.models.heartbeat import Heartbeat
from alertaclient.utils import origin
@click.command('heartbeats', short_help='List heartbeats')
@click.option('--alert', is_flag=True, help='Alert on stale or slow heartbeats')
@click.option('--severity', '-s', metavar='SEVERITY', default='major', help='Severity for heartbeat alerts')
@click.option('--timeout', metavar='SECONDS', type=int, help='Seconds before stale heartbeat alerts will be expired')
@click.option('--purge', is_flag=True, help='Delete all stale heartbeats')
@click.pass_obj
def cli(obj, alert, severity, timeout, purge):
"""List heartbeats."""
client = obj['client']
try:
default_normal_severity = obj['alarm_model']['defaults']['normal_severity']
except KeyError:
default_normal_severity = 'normal'
if severity in ['normal', 'ok', 'cleared']:
raise click.UsageError('Must be a non-normal severity. "{}" is one of {}'.format(
severity, ', '.join(['normal', 'ok', 'cleared']))
)
if severity not in obj['alarm_model']['severity'].keys():
raise click.UsageError('Must be a valid severity. "{}" is not one of {}'.format(
severity, ', '.join(obj['alarm_model']['severity'].keys()))
)
if obj['output'] == 'json':
r = client.http.get('/heartbeats')
heartbeats = [Heartbeat.parse(hb) for hb in r['heartbeats']]
click.echo(json.dumps(r['heartbeats'], sort_keys=True, indent=4, ensure_ascii=False))
else:
timezone = obj['timezone']
headers = {
'id': 'ID', 'origin': 'ORIGIN', 'customer': 'CUSTOMER', 'tags': 'TAGS', 'attributes': 'ATTRIBUTES',
'createTime': 'CREATED', 'receiveTime': 'RECEIVED', 'since': 'SINCE', 'timeout': 'TIMEOUT',
'latency': 'LATENCY', 'maxLatency': 'MAX LATENCY', 'status': 'STATUS'
}
heartbeats = client.get_heartbeats()
click.echo(tabulate([h.tabular(timezone) for h in heartbeats], headers=headers, tablefmt=obj['output']))
not_ok = [hb for hb in heartbeats if hb.status != 'ok']
if purge:
with click.progressbar(not_ok, label='Purging {} heartbeats'.format(len(not_ok))) as bar:
for b in bar:
client.delete_heartbeat(b.id)
if alert:
with click.progressbar(heartbeats, label='Alerting {} heartbeats'.format(len(heartbeats))) as bar:
for b in bar:
want_environment = b.attributes.pop('environment', 'Production')
want_severity = b.attributes.pop('severity', severity)
want_service = b.attributes.pop('service', ['Alerta'])
want_group = b.attributes.pop('group', 'System')
if b.status == 'expired': # aka. "stale"
client.send_alert(
resource=b.origin,
event='HeartbeatFail',
environment=want_environment,
severity=want_severity,
correlate=['HeartbeatFail', 'HeartbeatSlow', 'HeartbeatOK'],
service=want_service,
group=want_group,
value='{}'.format(b.since),
text='Heartbeat not received in {} seconds'.format(b.timeout),
tags=b.tags,
attributes=b.attributes,
origin=origin(),
type='heartbeatAlert',
timeout=timeout,
customer=b.customer
)
elif b.status == 'slow':
client.send_alert(
resource=b.origin,
event='HeartbeatSlow',
environment=want_environment,
severity=want_severity,
correlate=['HeartbeatFail', 'HeartbeatSlow', 'HeartbeatOK'],
service=want_service,
group=want_group,
value='{}ms'.format(b.latency),
text='Heartbeat took more than {}ms to be processed'.format(b.max_latency),
tags=b.tags,
attributes=b.attributes,
origin=origin(),
type='heartbeatAlert',
timeout=timeout,
customer=b.customer
)
else:
client.send_alert(
resource=b.origin,
event='HeartbeatOK',
environment=want_environment,
severity=default_normal_severity,
correlate=['HeartbeatFail', 'HeartbeatSlow', 'HeartbeatOK'],
service=want_service,
group=want_group,
value='',
text='Heartbeat OK',
tags=b.tags,
attributes=b.attributes,
origin=origin(),
type='heartbeatAlert',
timeout=timeout,
customer=b.customer
)
|
mit
| -4,965,624,064,348,664,000
| 44.487179
| 117
| 0.511086
| false
| 4.479798
| false
| false
| false
|
repotvsupertuga/tvsupertuga.repository
|
script.module.streamtvsupertuga/lib/resources/lib/sources/en_debrid/rlsbb.py
|
1
|
7811
|
# -*- coding: utf-8 -*-
import re,urllib,urlparse
import traceback
from resources.lib.modules import log_utils, source_utils
from resources.lib.modules import client, rd_check
from resources.lib.modules import debrid, control
from resources.lib.sources import cfscrape
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['rlsbb.ru']
self.base_link = 'http://rlsbb.ru'
self.search_base_link = 'http://search.rlsbb.ru'
self.search_cookie = 'serach_mode=rlsbb'
self.search_link = '/lib/search526049.php?phrase=%s&pindex=1&content=true'
self.headers = {'User-Agent': client.agent()}
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
if debrid.status() is False: return
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
if debrid.status() is False: return
try:
if url is None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
premDate = ''
query = '%s %s S%02dE%02d' % (
data['tvshowtitle'], data['year'], int(data['season']), int(data['episode']))
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
query = query.replace("&", "and")
query = query.replace(" ", " ")
query = query.replace(" ", "-")
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
url = "http://rlsbb.ru/" + query
if 'tvshowtitle' not in data: url = url + "-1080p"
r = cfscrape.get(url, headers=self.headers).content
if r is None and 'tvshowtitle' in data:
season = re.search('S(.*?)E', hdlr)
season = season.group(1)
query = title
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
query = query + "-S" + season
query = query.replace("&", "and")
query = query.replace(" ", " ")
query = query.replace(" ", "-")
url = "http://rlsbb.ru/" + query
r = cfscrape.get(url, headers=self.headers).content
for loopCount in range(0, 2):
if loopCount == 1 or (r is None and 'tvshowtitle' in data):
premDate = re.sub('[ \.]','-',data['premiered'])
query = re.sub('[\\\\:;*?"<>|/\-\']', '', data['tvshowtitle'])
query = query.replace("&", " and ").replace(" ", " ").replace(" ", "-")
query = query + "-" + premDate
url = "http://rlsbb.ru/" + query
url = url.replace('The-Late-Show-with-Stephen-Colbert','Stephen-Colbert')
r = cfscrape.get(url, headers=self.headers).content
posts = client.parseDOM(r, "div", attrs={"class": "content"})
hostDict = hostprDict + hostDict
if control.setting('deb.rd_check') == 'true':
limit = 25
items = []
for index, post in enumerate(posts):
if index == limit:
break
try:
u = client.parseDOM(post, 'a', ret='href')
for i in u:
try:
name = str(i)
if hdlr in name.upper():
items.append(name)
elif len(premDate) > 0 and premDate in name.replace(".", "-"):
items.append(name)
except:
pass
except:
pass
if len(items) > 0:
break
else:
items = []
for post in posts:
try:
u = client.parseDOM(post, 'a', ret='href')
for i in u:
try:
name = str(i)
if hdlr in name.upper():
items.append(name)
elif len(premDate) > 0 and premDate in name.replace(".", "-"):
items.append(name)
except:
pass
except:
pass
if len(items) > 0:
break
seen_urls = set()
for item in items:
try:
info = []
url = str(item)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
if url in seen_urls:
continue
seen_urls.add(url)
host = url.replace("\\", "")
host2 = host.strip('"')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(host2.strip().lower()).netloc)[0]
if host not in hostDict:
raise Exception()
if any(x in host2 for x in ['.rar', '.zip', '.iso']):
continue
quality, info = source_utils.get_release_quality(host2, host2)
info = ' | '.join(info)
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
if control.setting('deb.rd_check') == 'true':
check = rd_check.rd_deb_check(host2)
if check:
info = 'RD Checked' + ' | ' + info
sources.append(
{'source': host, 'quality': quality, 'language': 'en', 'url': check,
'info': info, 'direct': False, 'debridonly': True})
else:
sources.append(
{'source': host, 'quality': quality, 'language': 'en', 'url': host2,
'info': info, 'direct': False, 'debridonly': True})
except:
pass
check = [i for i in sources if not i['quality'] == 'CAM']
if check:
sources = check
return sources
except Exception:
failure = traceback.format_exc()
log_utils.log('---Rlsbb Testing - Exception: \n' + str(failure))
return sources
def resolve(self, url):
return url
|
gpl-2.0
| -5,736,061,815,632,537,000
| 38.852041
| 109
| 0.416592
| false
| 4.45579
| false
| false
| false
|
Dubrzr/golb
|
users/models.py
|
1
|
1432
|
from django.contrib.auth.base_user import BaseUserManager, AbstractBaseUser
from django.db import models
class UserManager(BaseUserManager):
def create_user(self, **user_data):
user = self.model()
print(dict(user_data))
user.set_password(user_data.pop('password'))
for key, value in user_data.items():
setattr(user, key, value)
user.save(using=self.db)
return user
def create_superuser(self, **user_data):
user = self.create_user(**user_data)
user.is_admin = True
user.is_superuser = True
user.is_staff = True
user.is_active = True
user.save(using=self.db)
return user
class User(AbstractBaseUser):
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['first_name', 'last_name', 'email', 'is_admin']
first_name = models.CharField(max_length=254)
last_name = models.CharField(max_length=254)
username = models.CharField(max_length=254, unique=True)
email = models.EmailField(max_length=254, unique=True)
date_joined = models.DateTimeField(auto_now=True)
is_admin = models.BooleanField(default=False)
def save(self, *args, **kwargs):
if not self.pk:
self.is_superuser = self.is_admin
self.is_staff = self.is_admin
super().save(*args, **kwargs)
else:
super().save(*args, **kwargs)
|
mit
| 1,415,178,026,668,730,400
| 31.545455
| 75
| 0.627793
| false
| 3.653061
| false
| false
| false
|
lino-framework/xl
|
lino_xl/lib/products/fixtures/furniture.py
|
1
|
4427
|
# -*- coding: UTF-8 -*-
# Copyright 2009-2019 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from lino.utils.instantiator import Instantiator
from lino_xl.lib.products.choicelists import ProductTypes
from lino.api import dd, _
def objects():
productcat = Instantiator('products.Category').build
product = Instantiator('products.Product', "sales_price category").build
furniture = productcat(
id=1, product_type=ProductTypes.default, **dd.babel_values(
'name', _("Furniture"), et="Mööbel", de="Möbel", fr="Meubles"))
yield furniture
# print "foo", furniture.id, furniture
hosting = productcat(
id=2, product_type=ProductTypes.default, **dd.babel_values(
'name', _("Website Hosting"),
et="Veebimajutus",
de="Website-Hosting",
fr="Hébergement de sites Internet"))
yield hosting
other = productcat(id=3, **dd.str2kw('name', _("Other")))
yield other
kw = dd.babel_values('name', _("Wooden table"),
et=u"Laud puidust",
de="Tisch aus Holz",
fr=u"Table en bois")
kw.update(dd.babel_values(
'description', _("""\
This table is made of pure wood.
It has **four legs**.
Designed to fit perfectly with **up to 6 wooden chairs**.
Product of the year 2008."""),
et="""\
See laud on tehtud ehtsast puust.
Sellel on **neli jalga**.
Disainitud sobida kokku **kuni 6 puidust tooliga**.
Product of the year 2008.""",
de="""\
Dieser Tisch ist aus echtem Holz.
Er hat **vier Beine**.
Passt perfekt zusammen mit **bis zu 6 Stühlen aus Holz**.
Produkt des Jahres 2008.""",
fr="""\
Cette table est en bois authentique.
Elle a **quatre jambes**.
Conçue pour mettre jusqu'à **6 chaises en bois**.
Produit de l'année 2008.""",
))
yield product("199.99", 1, **kw)
yield product("99.99", 1, **dd.babel_values('name', _("Wooden chair"),
et="Tool puidust",
de="Stuhl aus Holz",
fr="Chaise en bois"))
yield product("129.99", 1, **dd.babel_values('name', _("Metal table"),
et="Laud metallist",
de="Tisch aus Metall",
fr="Table en métal"))
yield product("79.99", 1, **dd.babel_values('name', _("Metal chair"),
et="Tool metallist",
de="Stuhl aus Metall",
fr="Chaise en métal"))
hosting = product("3.99", 2,
**dd.babel_values('name', _("Website hosting 1MB/month"),
et="Majutus 1MB/s",
de="Website-Hosting 1MB/Monat",
fr="Hébergement 1MB/mois"))
yield hosting
yield product("30.00", 2,
**dd.babel_values('name', _("IT consultation & maintenance"),
et=u"IKT konsultatsioonid & hooldustööd",
de=u"EDV Konsultierung & Unterhaltsarbeiten",
fr=u"ICT Consultation & maintenance"))
yield product("35.00", 2, **dd.babel_values(
'name', _("Server software installation, configuration and administration"),
et="Serveritarkvara installeerimine, seadistamine ja administreerimine",
de="Server software installation, configuration and administration",
fr="Server software installation, configuration and administration"))
yield product("40.00", 2, **dd.babel_values(
'name', _("Programming"),
et="Programmeerimistööd",
de="Programmierung",
fr="Programmation"))
yield product("25.00", 2, **dd.babel_values(
'name', _("Image processing and website content maintenance"),
et="Pilditöötlus ja kodulehtede sisuhaldustööd",
de="Bildbearbeitung und Unterhalt Website",
fr="Traitement d'images et maintenance site existant"))
yield product("29.90", 3, **dd.str2kw('name', _("Book"), vat_class="reduced"))
yield product("1.40", 3, **dd.str2kw('name', _("Stamp"), vat_class="exempt"))
|
bsd-2-clause
| -2,917,680,532,478,270,000
| 42.643564
| 84
| 0.54696
| false
| 3.56346
| false
| false
| false
|
transt/cloud-init-0.7.5
|
cloudinit/util.py
|
1
|
58202
|
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# pylint: disable=C0302
from StringIO import StringIO
import contextlib
import copy as obj_copy
import ctypes
import errno
import glob
import grp
import gzip
import hashlib
import json
import os
import os.path
import platform
import pwd
import random
import re
import shutil
import socket
import stat
import string # pylint: disable=W0402
import subprocess
import sys
import tempfile
import time
import urlparse
import yaml
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import mergers
from cloudinit import safeyaml
from cloudinit import type_utils
from cloudinit import url_helper
from cloudinit import version
from cloudinit.settings import (CFG_BUILTIN)
_DNS_REDIRECT_IP = None
LOG = logging.getLogger(__name__)
# Helps cleanup filenames to ensure they aren't FS incompatible
FN_REPLACEMENTS = {
os.sep: '_',
}
FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
# Helper utils to see if running in a container
CONTAINER_TESTS = ['running-in-container', 'lxc-is-container']
class ProcessExecutionError(IOError):
MESSAGE_TMPL = ('%(description)s\n'
'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Reason: %(reason)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r')
def __init__(self, stdout=None, stderr=None,
exit_code=None, cmd=None,
description=None, reason=None):
if not cmd:
self.cmd = '-'
else:
self.cmd = cmd
if not description:
self.description = 'Unexpected error while running command.'
else:
self.description = description
if not isinstance(exit_code, (long, int)):
self.exit_code = '-'
else:
self.exit_code = exit_code
if not stderr:
self.stderr = ''
else:
self.stderr = stderr
if not stdout:
self.stdout = ''
else:
self.stdout = stdout
if reason:
self.reason = reason
else:
self.reason = '-'
message = self.MESSAGE_TMPL % {
'description': self.description,
'cmd': self.cmd,
'exit_code': self.exit_code,
'stdout': self.stdout,
'stderr': self.stderr,
'reason': self.reason,
}
IOError.__init__(self, message)
class SeLinuxGuard(object):
def __init__(self, path, recursive=False):
# Late import since it might not always
# be possible to use this
try:
self.selinux = importer.import_module('selinux')
except ImportError:
self.selinux = None
self.path = path
self.recursive = recursive
def __enter__(self):
if self.selinux and self.selinux.is_selinux_enabled():
return True
else:
return False
def __exit__(self, excp_type, excp_value, excp_traceback):
if self.selinux and self.selinux.is_selinux_enabled():
path = os.path.realpath(os.path.expanduser(self.path))
# path should be a string, not unicode
path = str(path)
do_restore = False
try:
# See if even worth restoring??
stats = os.lstat(path)
if stat.ST_MODE in stats:
self.selinux.matchpathcon(path, stats[stat.ST_MODE])
do_restore = True
except OSError:
pass
if do_restore:
LOG.debug("Restoring selinux mode for %s (recursive=%s)",
path, self.recursive)
self.selinux.restorecon(path, recursive=self.recursive)
class MountFailedError(Exception):
pass
class DecompressionError(Exception):
pass
def ExtendedTemporaryFile(**kwargs):
fh = tempfile.NamedTemporaryFile(**kwargs)
# Replace its unlink with a quiet version
# that does not raise errors when the
# file to unlink has been unlinked elsewhere..
LOG.debug("Created temporary file %s", fh.name)
fh.unlink = del_file
# Add a new method that will unlink
# right 'now' but still lets the exit
# method attempt to remove it (which will
# not throw due to our del file being quiet
# about files that are not there)
def unlink_now():
fh.unlink(fh.name)
setattr(fh, 'unlink_now', unlink_now)
return fh
def fork_cb(child_cb, *args):
fid = os.fork()
if fid == 0:
try:
child_cb(*args)
os._exit(0) # pylint: disable=W0212
except:
logexc(LOG, "Failed forking and calling callback %s",
type_utils.obj_name(child_cb))
os._exit(1) # pylint: disable=W0212
else:
LOG.debug("Forked child %s who will run callback %s",
fid, type_utils.obj_name(child_cb))
def is_true(val, addons=None):
if isinstance(val, (bool)):
return val is True
check_set = ['true', '1', 'on', 'yes']
if addons:
check_set = check_set + addons
if str(val).lower().strip() in check_set:
return True
return False
def is_false(val, addons=None):
if isinstance(val, (bool)):
return val is False
check_set = ['off', '0', 'no', 'false']
if addons:
check_set = check_set + addons
if str(val).lower().strip() in check_set:
return True
return False
def translate_bool(val, addons=None):
if not val:
# This handles empty lists and false and
# other things that python believes are false
return False
# If its already a boolean skip
if isinstance(val, (bool)):
return val
return is_true(val, addons)
def rand_str(strlen=32, select_from=None):
if not select_from:
select_from = string.letters + string.digits
return "".join([random.choice(select_from) for _x in range(0, strlen)])
def read_conf(fname):
try:
return load_yaml(load_file(fname), default={})
except IOError as e:
if e.errno == errno.ENOENT:
return {}
else:
raise
# Merges X lists, and then keeps the
# unique ones, but orders by sort order
# instead of by the original order
def uniq_merge_sorted(*lists):
return sorted(uniq_merge(*lists))
# Merges X lists and then iterates over those
# and only keeps the unique items (order preserving)
# and returns that merged and uniqued list as the
# final result.
#
# Note: if any entry is a string it will be
# split on commas and empty entries will be
# evicted and merged in accordingly.
def uniq_merge(*lists):
combined_list = []
for a_list in lists:
if isinstance(a_list, (str, basestring)):
a_list = a_list.strip().split(",")
# Kickout the empty ones
a_list = [a for a in a_list if len(a)]
combined_list.extend(a_list)
return uniq_list(combined_list)
def clean_filename(fn):
for (k, v) in FN_REPLACEMENTS.iteritems():
fn = fn.replace(k, v)
removals = []
for k in fn:
if k not in FN_ALLOWED:
removals.append(k)
for k in removals:
fn = fn.replace(k, '')
fn = fn.strip()
return fn
def decomp_gzip(data, quiet=True):
try:
buf = StringIO(str(data))
with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh:
return gh.read()
except Exception as e:
if quiet:
return data
else:
raise DecompressionError(str(e))
def extract_usergroup(ug_pair):
if not ug_pair:
return (None, None)
ug_parted = ug_pair.split(':', 1)
u = ug_parted[0].strip()
if len(ug_parted) == 2:
g = ug_parted[1].strip()
else:
g = None
if not u or u == "-1" or u.lower() == "none":
u = None
if not g or g == "-1" or g.lower() == "none":
g = None
return (u, g)
def find_modules(root_dir):
entries = dict()
for fname in glob.glob(os.path.join(root_dir, "*.py")):
if not os.path.isfile(fname):
continue
modname = os.path.basename(fname)[0:-3]
modname = modname.strip()
if modname and modname.find(".") == -1:
entries[fname] = modname
return entries
def multi_log(text, console=True, stderr=True,
log=None, log_level=logging.DEBUG):
if stderr:
sys.stderr.write(text)
if console:
conpath = "/dev/console"
if os.path.exists(conpath):
with open(conpath, 'wb') as wfh:
wfh.write(text)
wfh.flush()
else:
# A container may lack /dev/console (arguably a container bug). If
# it does not exist, then write output to stdout. this will result
# in duplicate stderr and stdout messages if stderr was True.
#
# even though upstart or systemd might have set up output to go to
# /dev/console, the user may have configured elsewhere via
# cloud-config 'output'. If there is /dev/console, messages will
# still get there.
sys.stdout.write(text)
if log:
if text[-1] == "\n":
log.log(log_level, text[:-1])
else:
log.log(log_level, text)
def load_json(text, root_types=(dict,)):
decoded = json.loads(text)
if not isinstance(decoded, tuple(root_types)):
expected_types = ", ".join([str(t) for t in root_types])
raise TypeError("(%s) root types expected, got %s instead"
% (expected_types, type(decoded)))
return decoded
def is_ipv4(instr):
"""determine if input string is a ipv4 address. return boolean."""
toks = instr.split('.')
if len(toks) != 4:
return False
try:
toks = [x for x in toks if int(x) < 256 and int(x) >= 0]
except:
return False
return len(toks) == 4
def get_cfg_option_bool(yobj, key, default=False):
if key not in yobj:
return default
return translate_bool(yobj[key])
def get_cfg_option_str(yobj, key, default=None):
if key not in yobj:
return default
val = yobj[key]
if not isinstance(val, (str, basestring)):
val = str(val)
return val
def system_info():
return {
'platform': platform.platform(),
'release': platform.release(),
'python': platform.python_version(),
'uname': platform.uname(),
'dist': platform.linux_distribution(),
}
def get_cfg_option_list(yobj, key, default=None):
"""
Gets the C{key} config option from C{yobj} as a list of strings. If the
key is present as a single string it will be returned as a list with one
string arg.
@param yobj: The configuration object.
@param key: The configuration key to get.
@param default: The default to return if key is not found.
@return: The configuration option as a list of strings or default if key
is not found.
"""
if not key in yobj:
return default
if yobj[key] is None:
return []
val = yobj[key]
if isinstance(val, (list)):
cval = [v for v in val]
return cval
if not isinstance(val, (basestring)):
val = str(val)
return [val]
# get a cfg entry by its path array
# for f['a']['b']: get_cfg_by_path(mycfg,('a','b'))
def get_cfg_by_path(yobj, keyp, default=None):
cur = yobj
for tok in keyp:
if tok not in cur:
return default
cur = cur[tok]
return cur
def fixup_output(cfg, mode):
(outfmt, errfmt) = get_output_cfg(cfg, mode)
redirect_output(outfmt, errfmt)
return (outfmt, errfmt)
# redirect_output(outfmt, errfmt, orig_out, orig_err)
# replace orig_out and orig_err with filehandles specified in outfmt or errfmt
# fmt can be:
# > FILEPATH
# >> FILEPATH
# | program [ arg1 [ arg2 [ ... ] ] ]
#
# with a '|', arguments are passed to shell, so one level of
# shell escape is required.
#
# if _CLOUD_INIT_SAVE_STDOUT is set in environment to a non empty and true
# value then output input will not be closed (useful for debugging).
#
def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDOUT")):
LOG.debug("Not redirecting output due to _CLOUD_INIT_SAVE_STDOUT")
return
if not o_out:
o_out = sys.stdout
if not o_err:
o_err = sys.stderr
if outfmt:
LOG.debug("Redirecting %s to %s", o_out, outfmt)
(mode, arg) = outfmt.split(" ", 1)
if mode == ">" or mode == ">>":
owith = "ab"
if mode == ">":
owith = "wb"
new_fp = open(arg, owith)
elif mode == "|":
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
new_fp = proc.stdin # pylint: disable=E1101
else:
raise TypeError("Invalid type for output format: %s" % outfmt)
if o_out:
os.dup2(new_fp.fileno(), o_out.fileno())
if errfmt == outfmt:
LOG.debug("Redirecting %s to %s", o_err, outfmt)
os.dup2(new_fp.fileno(), o_err.fileno())
return
if errfmt:
LOG.debug("Redirecting %s to %s", o_err, errfmt)
(mode, arg) = errfmt.split(" ", 1)
if mode == ">" or mode == ">>":
owith = "ab"
if mode == ">":
owith = "wb"
new_fp = open(arg, owith)
elif mode == "|":
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
new_fp = proc.stdin # pylint: disable=E1101
else:
raise TypeError("Invalid type for error format: %s" % errfmt)
if o_err:
os.dup2(new_fp.fileno(), o_err.fileno())
def make_url(scheme, host, port=None,
path='', params='', query='', fragment=''):
pieces = []
pieces.append(scheme or '')
netloc = ''
if host:
netloc = str(host)
if port is not None:
netloc += ":" + "%s" % (port)
pieces.append(netloc or '')
pieces.append(path or '')
pieces.append(params or '')
pieces.append(query or '')
pieces.append(fragment or '')
return urlparse.urlunparse(pieces)
def mergemanydict(srcs, reverse=False):
if reverse:
srcs = reversed(srcs)
merged_cfg = {}
for cfg in srcs:
if cfg:
# Figure out which mergers to apply...
mergers_to_apply = mergers.dict_extract_mergers(cfg)
if not mergers_to_apply:
mergers_to_apply = mergers.default_mergers()
merger = mergers.construct(mergers_to_apply)
merged_cfg = merger.merge(merged_cfg, cfg)
return merged_cfg
@contextlib.contextmanager
def chdir(ndir):
curr = os.getcwd()
try:
os.chdir(ndir)
yield ndir
finally:
os.chdir(curr)
@contextlib.contextmanager
def umask(n_msk):
old = os.umask(n_msk)
try:
yield old
finally:
os.umask(old)
@contextlib.contextmanager
def tempdir(**kwargs):
# This seems like it was only added in python 3.2
# Make it since its useful...
# See: http://bugs.python.org/file12970/tempdir.patch
tdir = tempfile.mkdtemp(**kwargs)
try:
yield tdir
finally:
del_dir(tdir)
def center(text, fill, max_len):
return '{0:{fill}{align}{size}}'.format(text, fill=fill,
align="^", size=max_len)
def del_dir(path):
LOG.debug("Recursively deleting %s", path)
shutil.rmtree(path)
def runparts(dirp, skip_no_exist=True, exe_prefix=None):
if skip_no_exist and not os.path.isdir(dirp):
return
failed = []
attempted = []
if exe_prefix is None:
prefix = []
elif isinstance(exe_prefix, str):
prefix = [str(exe_prefix)]
elif isinstance(exe_prefix, list):
prefix = exe_prefix
else:
raise TypeError("exe_prefix must be None, str, or list")
for exe_name in sorted(os.listdir(dirp)):
exe_path = os.path.join(dirp, exe_name)
if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
attempted.append(exe_path)
try:
subp(prefix + [exe_path], capture=False)
except ProcessExecutionError as e:
logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
failed.append(e)
if failed and attempted:
raise RuntimeError('Runparts: %s failures in %s attempted commands'
% (len(failed), len(attempted)))
# read_optional_seed
# returns boolean indicating success or failure (presense of files)
# if files are present, populates 'fill' dictionary with 'user-data' and
# 'meta-data' entries
def read_optional_seed(fill, base="", ext="", timeout=5):
try:
(md, ud) = read_seeded(base, ext, timeout)
fill['user-data'] = ud
fill['meta-data'] = md
return True
except url_helper.UrlError as e:
if e.code == url_helper.NOT_FOUND:
return False
raise
def fetch_ssl_details(paths=None):
ssl_details = {}
# Lookup in these locations for ssl key/cert files
ssl_cert_paths = [
'/opt/freeware/var/lib/cloud/data/ssl',
'/opt/freeware/var/lib/cloud/instance/data/ssl',
]
if paths:
ssl_cert_paths.extend([
os.path.join(paths.get_ipath_cur('data'), 'ssl'),
os.path.join(paths.get_cpath('data'), 'ssl'),
])
ssl_cert_paths = uniq_merge(ssl_cert_paths)
ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)]
cert_file = None
for d in ssl_cert_paths:
if os.path.isfile(os.path.join(d, 'cert.pem')):
cert_file = os.path.join(d, 'cert.pem')
break
key_file = None
for d in ssl_cert_paths:
if os.path.isfile(os.path.join(d, 'key.pem')):
key_file = os.path.join(d, 'key.pem')
break
if cert_file and key_file:
ssl_details['cert_file'] = cert_file
ssl_details['key_file'] = key_file
elif cert_file:
ssl_details['cert_file'] = cert_file
return ssl_details
def read_file_or_url(url, timeout=5, retries=10,
headers=None, data=None, sec_between=1, ssl_details=None,
headers_cb=None, exception_cb=None):
url = url.lstrip()
if url.startswith("/"):
url = "file://%s" % url
if url.lower().startswith("file://"):
if data:
LOG.warn("Unable to post data to file resource %s", url)
file_path = url[len("file://"):]
try:
contents = load_file(file_path)
except IOError as e:
code = e.errno
if e.errno == errno.ENOENT:
code = url_helper.NOT_FOUND
raise url_helper.UrlError(cause=e, code=code, headers=None)
return url_helper.FileResponse(file_path, contents=contents)
else:
return url_helper.readurl(url,
timeout=timeout,
retries=retries,
headers=headers,
headers_cb=headers_cb,
data=data,
sec_between=sec_between,
ssl_details=ssl_details,
exception_cb=exception_cb)
def load_yaml(blob, default=None, allowed=(dict,)):
loaded = default
try:
blob = str(blob)
LOG.debug(("Attempting to load yaml from string "
"of length %s with allowed root types %s"),
len(blob), allowed)
converted = safeyaml.load(blob)
if not isinstance(converted, allowed):
# Yes this will just be caught, but thats ok for now...
raise TypeError(("Yaml load allows %s root types,"
" but got %s instead") %
(allowed, type_utils.obj_name(converted)))
loaded = converted
except (yaml.YAMLError, TypeError, ValueError):
if len(blob) == 0:
LOG.debug("load_yaml given empty string, returning default")
else:
logexc(LOG, "Failed loading yaml blob")
return loaded
def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
if base.startswith("/"):
base = "file://%s" % base
# default retries for file is 0. for network is 10
if base.startswith("file://"):
retries = file_retries
if base.find("%s") >= 0:
ud_url = base % ("user-data" + ext)
md_url = base % ("meta-data" + ext)
else:
ud_url = "%s%s%s" % (base, "user-data", ext)
md_url = "%s%s%s" % (base, "meta-data", ext)
md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
md = None
if md_resp.ok():
md_str = str(md_resp)
md = load_yaml(md_str, default={})
ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)
ud = None
if ud_resp.ok():
ud_str = str(ud_resp)
ud = ud_str
return (md, ud)
def read_conf_d(confd):
# Get reverse sorted list (later trumps newer)
confs = sorted(os.listdir(confd), reverse=True)
# Remove anything not ending in '.cfg'
confs = [f for f in confs if f.endswith(".cfg")]
# Remove anything not a file
confs = [f for f in confs
if os.path.isfile(os.path.join(confd, f))]
# Load them all so that they can be merged
cfgs = []
for fn in confs:
cfgs.append(read_conf(os.path.join(confd, fn)))
return mergemanydict(cfgs)
def read_conf_with_confd(cfgfile):
cfg = read_conf(cfgfile)
confd = False
if "conf_d" in cfg:
confd = cfg['conf_d']
if confd:
if not isinstance(confd, (str, basestring)):
raise TypeError(("Config file %s contains 'conf_d' "
"with non-string type %s") %
(cfgfile, type_utils.obj_name(confd)))
else:
confd = str(confd).strip()
elif os.path.isdir("%s.d" % cfgfile):
confd = "%s.d" % cfgfile
if not confd or not os.path.isdir(confd):
return cfg
# Conf.d settings override input configuration
confd_cfg = read_conf_d(confd)
return mergemanydict([confd_cfg, cfg])
def read_cc_from_cmdline(cmdline=None):
# this should support reading cloud-config information from
# the kernel command line. It is intended to support content of the
# format:
# cc: <yaml content here> [end_cc]
# this would include:
# cc: ssh_import_id: [smoser, kirkland]\\n
# cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
# cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc
if cmdline is None:
cmdline = get_cmdline()
tag_begin = "cc:"
tag_end = "end_cc"
begin_l = len(tag_begin)
end_l = len(tag_end)
clen = len(cmdline)
tokens = []
begin = cmdline.find(tag_begin)
while begin >= 0:
end = cmdline.find(tag_end, begin + begin_l)
if end < 0:
end = clen
tokens.append(cmdline[begin + begin_l:end].lstrip().replace("\\n",
"\n"))
begin = cmdline.find(tag_begin, end + end_l)
return '\n'.join(tokens)
def dos2unix(contents):
# find first end of line
pos = contents.find('\n')
if pos <= 0 or contents[pos - 1] != '\r':
return contents
return contents.replace('\r\n', '\n')
def get_hostname_fqdn(cfg, cloud):
# return the hostname and fqdn from 'cfg'. If not found in cfg,
# then fall back to data from cloud
if "fqdn" in cfg:
# user specified a fqdn. Default hostname then is based off that
fqdn = cfg['fqdn']
hostname = get_cfg_option_str(cfg, "hostname", fqdn.split('.')[0])
else:
if "hostname" in cfg and cfg['hostname'].find('.') > 0:
# user specified hostname, and it had '.' in it
# be nice to them. set fqdn and hostname from that
fqdn = cfg['hostname']
hostname = cfg['hostname'][:fqdn.find('.')]
else:
# no fqdn set, get fqdn from cloud.
# get hostname from cfg if available otherwise cloud
fqdn = cloud.get_hostname(fqdn=True)
if "hostname" in cfg:
hostname = cfg['hostname']
else:
hostname = cloud.get_hostname()
return (hostname, fqdn)
def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
"""
For each host a single line should be present with
the following information:
IP_address canonical_hostname [aliases...]
Fields of the entry are separated by any number of blanks and/or tab
characters. Text from a "#" character until the end of the line is a
comment, and is ignored. Host names may contain only alphanumeric
characters, minus signs ("-"), and periods ("."). They must begin with
an alphabetic character and end with an alphanumeric character.
Optional aliases provide for name changes, alternate spellings, shorter
hostnames, or generic hostnames (for example, localhost).
"""
fqdn = None
try:
for line in load_file(filename).splitlines():
hashpos = line.find("#")
if hashpos >= 0:
line = line[0:hashpos]
line = line.strip()
if not line:
continue
# If there there is less than 3 entries
# (IP_address, canonical_hostname, alias)
# then ignore this line
toks = line.split()
if len(toks) < 3:
continue
if hostname in toks[2:]:
fqdn = toks[1]
break
except IOError:
pass
return fqdn
def get_cmdline_url(names=('cloud-config-url', 'url'),
starts="#cloud-config", cmdline=None):
if cmdline is None:
cmdline = get_cmdline()
data = keyval_str_to_dict(cmdline)
url = None
key = None
for key in names:
if key in data:
url = data[key]
break
if not url:
return (None, None, None)
resp = read_file_or_url(url)
if resp.contents.startswith(starts) and resp.ok():
return (key, url, str(resp))
return (key, url, None)
def is_resolvable(name):
"""determine if a url is resolvable, return a boolean
This also attempts to be resilent against dns redirection.
Note, that normal nsswitch resolution is used here. So in order
to avoid any utilization of 'search' entries in /etc/resolv.conf
we have to append '.'.
The top level 'invalid' domain is invalid per RFC. And example.com
should also not exist. The random entry will be resolved inside
the search list.
"""
global _DNS_REDIRECT_IP # pylint: disable=W0603
if _DNS_REDIRECT_IP is None:
badips = set()
badnames = ("does-not-exist.example.com.", "example.invalid.",
rand_str())
badresults = {}
for iname in badnames:
try:
result = socket.getaddrinfo(iname, None, 0, 0,
socket.SOCK_STREAM, socket.AI_CANONNAME)
badresults[iname] = []
for (_fam, _stype, _proto, cname, sockaddr) in result:
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
badips.add(sockaddr[0])
except (socket.gaierror, socket.error):
pass
_DNS_REDIRECT_IP = badips
if badresults:
LOG.debug("detected dns redirection: %s", badresults)
try:
result = socket.getaddrinfo(name, None)
# check first result's sockaddr field
addr = result[0][4][0]
if addr in _DNS_REDIRECT_IP:
return False
return True
except (socket.gaierror, socket.error):
return False
def get_hostname():
hostname = socket.gethostname()
return hostname
def gethostbyaddr(ip):
try:
return socket.gethostbyaddr(ip)[0]
except socket.herror:
return None
def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
return is_resolvable(urlparse.urlparse(url).hostname)
def search_for_mirror(candidates):
"""
Search through a list of mirror urls for one that works
This needs to return quickly.
"""
for cand in candidates:
try:
if is_resolvable_url(cand):
return cand
except Exception:
pass
return None
def close_stdin():
"""
reopen stdin as /dev/null so even subprocesses or other os level things get
/dev/null as input.
if _CLOUD_INIT_SAVE_STDIN is set in environment to a non empty and true
value then input will not be closed (useful for debugging).
"""
if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDIN")):
return
with open(os.devnull) as fp:
os.dup2(fp.fileno(), sys.stdin.fileno())
def find_devs_with(criteria=None, oformat='device',
tag=None, no_cache=False, path=None):
"""
find devices matching given criteria (via blkid)
criteria can be *one* of:
TYPE=<filesystem>
LABEL=<label>
UUID=<uuid>
"""
blk_id_cmd = ['blkid']
options = []
if criteria:
# Search for block devices with tokens named NAME that
# have the value 'value' and display any devices which are found.
# Common values for NAME include TYPE, LABEL, and UUID.
# If there are no devices specified on the command line,
# all block devices will be searched; otherwise,
# only search the devices specified by the user.
options.append("-t%s" % (criteria))
if tag:
# For each (specified) device, show only the tags that match tag.
options.append("-s%s" % (tag))
if no_cache:
# If you want to start with a clean cache
# (i.e. don't report devices previously scanned
# but not necessarily available at this time), specify /dev/null.
options.extend(["-c", "/dev/null"])
if oformat:
# Display blkid's output using the specified format.
# The format parameter may be:
# full, value, list, device, udev, export
options.append('-o%s' % (oformat))
if path:
options.append(path)
cmd = blk_id_cmd + options
# See man blkid for why 2 is added
(out, _err) = subp(cmd, rcs=[0, 2])
entries = []
for line in out.splitlines():
line = line.strip()
if line:
entries.append(line)
return entries
def peek_file(fname, max_bytes):
LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes)
with open(fname, 'rb') as ifh:
return ifh.read(max_bytes)
def uniq_list(in_list):
out_list = []
for i in in_list:
if i in out_list:
continue
else:
out_list.append(i)
return out_list
def load_file(fname, read_cb=None, quiet=False):
LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
ofh = StringIO()
try:
with open(fname, 'rb') as ifh:
pipe_in_out(ifh, ofh, chunk_cb=read_cb)
except IOError as e:
if not quiet:
raise
if e.errno != errno.ENOENT:
raise
contents = ofh.getvalue()
LOG.debug("Read %s bytes from %s", len(contents), fname)
return contents
def get_cmdline():
if 'DEBUG_PROC_CMDLINE' in os.environ:
cmdline = os.environ["DEBUG_PROC_CMDLINE"]
else:
try:
cmdline = load_file("/proc/cmdline").strip()
except:
cmdline = ""
return cmdline
def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None):
bytes_piped = 0
while True:
data = in_fh.read(chunk_size)
if data == '':
break
else:
out_fh.write(data)
bytes_piped += len(data)
if chunk_cb:
chunk_cb(bytes_piped)
out_fh.flush()
return bytes_piped
def chownbyid(fname, uid=None, gid=None):
if uid in [None, -1] and gid in [None, -1]:
# Nothing to do
return
LOG.debug("Changing the ownership of %s to %s:%s", fname, uid, gid)
os.chown(fname, uid, gid)
def chownbyname(fname, user=None, group=None):
uid = -1
gid = -1
try:
if user:
uid = pwd.getpwnam(user).pw_uid
if group:
gid = grp.getgrnam(group).gr_gid
except KeyError as e:
raise OSError("Unknown user or group: %s" % (e))
chownbyid(fname, uid, gid)
# Always returns well formated values
# cfg is expected to have an entry 'output' in it, which is a dictionary
# that includes entries for 'init', 'config', 'final' or 'all'
# init: /var/log/cloud.out
# config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ]
# final:
# output: "| logger -p"
# error: "> /dev/null"
# this returns the specific 'mode' entry, cleanly formatted, with value
def get_output_cfg(cfg, mode):
ret = [None, None]
if not cfg or not 'output' in cfg:
return ret
outcfg = cfg['output']
if mode in outcfg:
modecfg = outcfg[mode]
else:
if 'all' not in outcfg:
return ret
# if there is a 'all' item in the output list
# then it applies to all users of this (init, config, final)
modecfg = outcfg['all']
# if value is a string, it specifies stdout and stderr
if isinstance(modecfg, str):
ret = [modecfg, modecfg]
# if its a list, then we expect (stdout, stderr)
if isinstance(modecfg, list):
if len(modecfg) > 0:
ret[0] = modecfg[0]
if len(modecfg) > 1:
ret[1] = modecfg[1]
# if it is a dictionary, expect 'out' and 'error'
# items, which indicate out and error
if isinstance(modecfg, dict):
if 'output' in modecfg:
ret[0] = modecfg['output']
if 'error' in modecfg:
ret[1] = modecfg['error']
# if err's entry == "&1", then make it same as stdout
# as in shell syntax of "echo foo >/dev/null 2>&1"
if ret[1] == "&1":
ret[1] = ret[0]
swlist = [">>", ">", "|"]
for i in range(len(ret)):
if not ret[i]:
continue
val = ret[i].lstrip()
found = False
for s in swlist:
if val.startswith(s):
val = "%s %s" % (s, val[len(s):].strip())
found = True
break
if not found:
# default behavior is append
val = "%s %s" % (">>", val.strip())
ret[i] = val
return ret
def logexc(log, msg, *args):
# Setting this here allows this to change
# levels easily (not always error level)
# or even desirable to have that much junk
# coming out to a non-debug stream
if msg:
log.warn(msg, *args)
# Debug gets the full trace
log.debug(msg, exc_info=1, *args)
def hash_blob(blob, routine, mlen=None):
hasher = hashlib.new(routine)
hasher.update(blob)
digest = hasher.hexdigest()
# Don't get to long now
if mlen is not None:
return digest[0:mlen]
else:
return digest
def is_user(name):
try:
if pwd.getpwnam(name):
return True
except KeyError:
return False
def is_group(name):
try:
if grp.getgrnam(name):
return True
except KeyError:
return False
def rename(src, dest):
LOG.debug("Renaming %s to %s", src, dest)
# TODO(harlowja) use a se guard here??
os.rename(src, dest)
def ensure_dirs(dirlist, mode=0755):
for d in dirlist:
ensure_dir(d, mode)
def read_write_cmdline_url(target_fn):
if not os.path.exists(target_fn):
try:
(key, url, content) = get_cmdline_url()
except:
logexc(LOG, "Failed fetching command line url")
return
try:
if key and content:
write_file(target_fn, content, mode=0600)
LOG.debug(("Wrote to %s with contents of command line"
" url %s (len=%s)"), target_fn, url, len(content))
elif key and not content:
LOG.debug(("Command line key %s with url"
" %s had no contents"), key, url)
except:
logexc(LOG, "Failed writing url content to %s", target_fn)
def yaml_dumps(obj):
formatted = yaml.dump(obj,
line_break="\n",
indent=4,
explicit_start=True,
explicit_end=True,
default_flow_style=False)
return formatted
def ensure_dir(path, mode=None):
if not os.path.isdir(path):
# Make the dir and adjust the mode
with SeLinuxGuard(os.path.dirname(path), recursive=True):
os.makedirs(path)
chmod(path, mode)
else:
# Just adjust the mode
chmod(path, mode)
@contextlib.contextmanager
def unmounter(umount):
try:
yield umount
finally:
if umount:
umount_cmd = ["umount", '-l', umount]
subp(umount_cmd)
def mounts():
mounted = {}
try:
# Go through mounts to see what is already mounted
if os.path.exists("/proc/mounts"):
mount_locs = load_file("/proc/mounts").splitlines()
method = 'proc'
else:
(mountoutput, _err) = subp("mount")
mount_locs = mountoutput.splitlines()
method = 'mount'
mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
for mpline in mount_locs:
# Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
# FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
try:
if method == 'proc':
(dev, mp, fstype, opts, _freq, _passno) = mpline.split()
else:
m = re.search(mountre, mpline)
dev = m.group(1)
mp = m.group(2)
fstype = m.group(3)
opts = m.group(4)
except:
continue
# If the name of the mount point contains spaces these
# can be escaped as '\040', so undo that..
mp = mp.replace("\\040", " ")
mounted[dev] = {
'fstype': fstype,
'mountpoint': mp,
'opts': opts,
}
LOG.debug("Fetched %s mounts from %s", mounted, method)
except (IOError, OSError):
logexc(LOG, "Failed fetching mount points")
return mounted
def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
"""
Mount the device, call method 'callback' passing the directory
in which it was mounted, then unmount. Return whatever 'callback'
returned. If data != None, also pass data to callback.
"""
mounted = mounts()
with tempdir() as tmpd:
umount = False
if device in mounted:
mountpoint = mounted[device]['mountpoint']
else:
try:
mountcmd = ['mount']
mountopts = []
if rw:
mountopts.append('rw')
else:
mountopts.append('ro')
if sync:
# This seems like the safe approach to do
# (ie where this is on by default)
mountopts.append("sync")
if mountopts:
mountcmd.extend(["-o", ",".join(mountopts)])
if mtype:
mountcmd.extend(['-t', mtype])
mountcmd.append(device)
mountcmd.append(tmpd)
subp(mountcmd)
umount = tmpd # This forces it to be unmounted (when set)
mountpoint = tmpd
except (IOError, OSError) as exc:
raise MountFailedError(("Failed mounting %s "
"to %s due to: %s") %
(device, tmpd, exc))
# Be nice and ensure it ends with a slash
if not mountpoint.endswith("/"):
mountpoint += "/"
with unmounter(umount):
if data is None:
ret = callback(mountpoint)
else:
ret = callback(mountpoint, data)
return ret
def get_builtin_cfg():
# Deep copy so that others can't modify
return obj_copy.deepcopy(CFG_BUILTIN)
def sym_link(source, link, force=False):
LOG.debug("Creating symbolic link from %r => %r", link, source)
if force and os.path.exists(link):
del_file(link)
os.symlink(source, link)
def del_file(path):
LOG.debug("Attempting to remove %s", path)
try:
os.unlink(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise e
def copy(src, dest):
LOG.debug("Copying %s to %s", src, dest)
shutil.copy(src, dest)
def time_rfc2822():
try:
ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
except:
ts = "??"
return ts
def uptime():
uptime_str = '??'
method = 'unknown'
try:
if os.path.exists("/proc/uptime"):
method = '/proc/uptime'
contents = load_file("/proc/uptime").strip()
if contents:
uptime_str = contents.split()[0]
elif os.path.exists("/usr/sbin/acct/fwtmp"): # for AIX support
method = '/usr/sbin/acct/fwtmp'
import commands
contents = commands.getoutput('/usr/sbin/acct/fwtmp < /var/adm/wtmp | /usr/bin/grep "system boot" 2>/dev/null')
if contents:
bootup = contents.splitlines()[-1].split()[6]
now = time.time()
uptime_str = now - float(bootup)
else:
method = 'ctypes'
libc = ctypes.CDLL('/lib/libc.so.7')
size = ctypes.c_size_t()
buf = ctypes.c_int()
size.value = ctypes.sizeof(buf)
libc.sysctlbyname("kern.boottime", ctypes.byref(buf),
ctypes.byref(size), None, 0)
now = time.time()
bootup = buf.value
uptime_str = now - bootup
except:
logexc(LOG, "Unable to read uptime using method: %s" % method)
return uptime_str
def append_file(path, content):
write_file(path, content, omode="ab", mode=None)
def ensure_file(path, mode=0644):
write_file(path, content='', omode="ab", mode=mode)
def safe_int(possible_int):
try:
return int(possible_int)
except (ValueError, TypeError):
return None
def chmod(path, mode):
real_mode = safe_int(mode)
if path and real_mode:
with SeLinuxGuard(path):
os.chmod(path, real_mode)
def write_file(filename, content, mode=0644, omode="wb"):
"""
Writes a file with the given content and sets the file mode as specified.
Resotres the SELinux context if possible.
@param filename: The full path of the file to write.
@param content: The content to write to the file.
@param mode: The filesystem mode to set on the file.
@param omode: The open mode used when opening the file (r, rb, a, etc.)
"""
ensure_dir(os.path.dirname(filename))
LOG.debug("Writing to %s - %s: [%s] %s bytes",
filename, omode, mode, len(content))
with SeLinuxGuard(path=filename):
with open(filename, omode) as fh:
fh.write(content)
fh.flush()
chmod(filename, mode)
def delete_dir_contents(dirname):
"""
Deletes all contents of a directory without deleting the directory itself.
@param dirname: The directory whose contents should be deleted.
"""
for node in os.listdir(dirname):
node_fullpath = os.path.join(dirname, node)
if os.path.isdir(node_fullpath):
del_dir(node_fullpath)
else:
del_file(node_fullpath)
def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
logstring=False):
if rcs is None:
rcs = [0]
try:
if not logstring:
LOG.debug(("Running command %s with allowed return codes %s"
" (shell=%s, capture=%s)"), args, rcs, shell, capture)
else:
LOG.debug(("Running hidden command to protect sensitive "
"input/output logstring: %s"), logstring)
if not capture:
stdout = None
stderr = None
else:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
stdin = subprocess.PIPE
sp = subprocess.Popen(args, stdout=stdout,
stderr=stderr, stdin=stdin,
env=env, shell=shell)
(out, err) = sp.communicate(data)
except OSError as e:
raise ProcessExecutionError(cmd=args, reason=e)
rc = sp.returncode # pylint: disable=E1101
if rc not in rcs:
raise ProcessExecutionError(stdout=out, stderr=err,
exit_code=rc,
cmd=args)
# Just ensure blank instead of none?? (iff capturing)
if not out and capture:
out = ''
if not err and capture:
err = ''
return (out, err)
def make_header(comment_char="#", base='created'):
ci_ver = version.version_string()
header = str(comment_char)
header += " %s by cloud-init v. %s" % (base.title(), ci_ver)
header += " on %s" % time_rfc2822()
return header
def abs_join(*paths):
return os.path.abspath(os.path.join(*paths))
# shellify, takes a list of commands
# for each entry in the list
# if it is an array, shell protect it (with single ticks)
# if it is a string, do nothing
def shellify(cmdlist, add_header=True):
content = ''
if add_header:
content += "#!/bin/sh\n"
escaped = "%s%s%s%s" % ("'", '\\', "'", "'")
cmds_made = 0
for args in cmdlist:
# If the item is a list, wrap all items in single tick.
# If its not, then just write it directly.
if isinstance(args, list):
fixed = []
for f in args:
fixed.append("'%s'" % (str(f).replace("'", escaped)))
content = "%s%s\n" % (content, ' '.join(fixed))
cmds_made += 1
elif isinstance(args, (str, basestring)):
content = "%s%s\n" % (content, args)
cmds_made += 1
else:
raise RuntimeError(("Unable to shellify type %s"
" which is not a list or string")
% (type_utils.obj_name(args)))
LOG.debug("Shellified %s commands.", cmds_made)
return content
def strip_prefix_suffix(line, prefix=None, suffix=None):
if prefix and line.startswith(prefix):
line = line[len(prefix):]
if suffix and line.endswith(suffix):
line = line[:-len(suffix)]
return line
def is_container():
"""
Checks to see if this code running in a container of some sort
"""
for helper in CONTAINER_TESTS:
try:
# try to run a helper program. if it returns true/zero
# then we're inside a container. otherwise, no
subp([helper])
return True
except (IOError, OSError):
pass
# this code is largely from the logic in
# ubuntu's /etc/init/container-detect.conf
try:
# Detect old-style libvirt
# Detect OpenVZ containers
pid1env = get_proc_env(1)
if "container" in pid1env:
return True
if "LIBVIRT_LXC_UUID" in pid1env:
return True
except (IOError, OSError):
pass
# Detect OpenVZ containers
if os.path.isdir("/proc/vz") and not os.path.isdir("/proc/bc"):
return True
try:
# Detect Vserver containers
lines = load_file("/proc/self/status").splitlines()
for line in lines:
if line.startswith("VxID:"):
(_key, val) = line.strip().split(":", 1)
if val != "0":
return True
except (IOError, OSError):
pass
return False
def get_proc_env(pid):
"""
Return the environment in a dict that a given process id was started with.
"""
env = {}
fn = os.path.join("/proc/", str(pid), "environ")
try:
contents = load_file(fn)
toks = contents.split("\x00")
for tok in toks:
if tok == "":
continue
(name, val) = tok.split("=", 1)
if name:
env[name] = val
except (IOError, OSError):
pass
return env
def keyval_str_to_dict(kvstring):
ret = {}
for tok in kvstring.split():
try:
(key, val) = tok.split("=", 1)
except ValueError:
key = tok
val = True
ret[key] = val
return ret
def is_partition(device):
if device.startswith("/dev/"):
device = device[5:]
return os.path.isfile("/sys/class/block/%s/partition" % device)
def expand_package_list(version_fmt, pkgs):
# we will accept tuples, lists of tuples, or just plain lists
if not isinstance(pkgs, list):
pkgs = [pkgs]
pkglist = []
for pkg in pkgs:
if isinstance(pkg, basestring):
pkglist.append(pkg)
continue
if isinstance(pkg, (tuple, list)):
if len(pkg) < 1 or len(pkg) > 2:
raise RuntimeError("Invalid package & version tuple.")
if len(pkg) == 2 and pkg[1]:
pkglist.append(version_fmt % tuple(pkg))
continue
pkglist.append(pkg[0])
else:
raise RuntimeError("Invalid package type.")
return pkglist
def parse_mount_info(path, mountinfo_lines, log=LOG):
"""Return the mount information for PATH given the lines from
/proc/$$/mountinfo."""
path_elements = [e for e in path.split('/') if e]
devpth = None
fs_type = None
match_mount_point = None
match_mount_point_elements = None
for i, line in enumerate(mountinfo_lines):
parts = line.split()
# Completely fail if there is anything in any line that is
# unexpected, as continuing to parse past a bad line could
# cause an incorrect result to be returned, so it's better
# return nothing than an incorrect result.
# The minimum number of elements in a valid line is 10.
if len(parts) < 10:
log.debug("Line %d has two few columns (%d): %s",
i + 1, len(parts), line)
return None
mount_point = parts[4]
mount_point_elements = [e for e in mount_point.split('/') if e]
# Ignore mounts deeper than the path in question.
if len(mount_point_elements) > len(path_elements):
continue
# Ignore mounts where the common path is not the same.
l = min(len(mount_point_elements), len(path_elements))
if mount_point_elements[0:l] != path_elements[0:l]:
continue
# Ignore mount points higher than an already seen mount
# point.
if (match_mount_point_elements is not None and
len(match_mount_point_elements) > len(mount_point_elements)):
continue
# Find the '-' which terminates a list of optional columns to
# find the filesystem type and the path to the device. See
# man 5 proc for the format of this file.
try:
i = parts.index('-')
except ValueError:
log.debug("Did not find column named '-' in line %d: %s",
i + 1, line)
return None
# Get the path to the device.
try:
fs_type = parts[i + 1]
devpth = parts[i + 2]
except IndexError:
log.debug("Too few columns after '-' column in line %d: %s",
i + 1, line)
return None
match_mount_point = mount_point
match_mount_point_elements = mount_point_elements
if devpth and fs_type and match_mount_point:
return (devpth, fs_type, match_mount_point)
else:
return None
def parse_mtab(path):
"""On older kernels there's no /proc/$$/mountinfo, so use mtab."""
for line in load_file("/etc/mtab").splitlines():
devpth, mount_point, fs_type = line.split()[:3]
if mount_point == path:
return devpth, fs_type, mount_point
return None
def parse_mount(path):
(mountoutput, _err) = subp("mount")
mount_locs = mountoutput.splitlines()
for line in mount_locs:
m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line)
devpth = m.group(1)
mount_point = m.group(2)
fs_type = m.group(3)
if mount_point == path:
return devpth, fs_type, mount_point
return None
def get_mount_info(path, log=LOG):
# Use /proc/$$/mountinfo to find the device where path is mounted.
# This is done because with a btrfs filesystem using os.stat(path)
# does not return the ID of the device.
#
# Here, / has a device of 18 (decimal).
#
# $ stat /
# File: '/'
# Size: 234 Blocks: 0 IO Block: 4096 directory
# Device: 12h/18d Inode: 256 Links: 1
# Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
# Access: 2013-01-13 07:31:04.358011255 +0000
# Modify: 2013-01-13 18:48:25.930011255 +0000
# Change: 2013-01-13 18:48:25.930011255 +0000
# Birth: -
#
# Find where / is mounted:
#
# $ mount | grep ' / '
# /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo)
#
# And the device ID for /dev/vda1 is not 18:
#
# $ ls -l /dev/vda1
# brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1
#
# So use /proc/$$/mountinfo to find the device underlying the
# input path.
mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
if os.path.exists(mountinfo_path):
lines = load_file(mountinfo_path).splitlines()
return parse_mount_info(path, lines, log)
elif os.path.exists("/etc/mtab"):
return parse_mtab(path)
else:
return parse_mount(path)
def which(program):
# Return path of program for execution if found in path
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
_fpath, _ = os.path.split(program)
if _fpath:
if is_exe(program):
return program
else:
for path in os.environ.get("PATH", "").split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False):
if args is None:
args = []
if kwargs is None:
kwargs = {}
start = time.time()
ustart = None
if get_uptime:
try:
ustart = float(uptime())
except ValueError:
pass
try:
ret = func(*args, **kwargs)
finally:
delta = time.time() - start
udelta = None
if ustart is not None:
try:
udelta = float(uptime()) - ustart
except ValueError:
pass
tmsg = " took %0.3f seconds" % delta
if get_uptime:
if isinstance(udelta, (float)):
tmsg += " (%0.2f)" % udelta
else:
tmsg += " (N/A)"
try:
logfunc(msg + tmsg)
except:
pass
return ret
def expand_dotted_devname(dotted):
toks = dotted.rsplit(".", 1)
if len(toks) > 1:
return toks
else:
return (dotted, None)
def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
# return a dictionary populated with keys in 'required' and 'optional'
# by reading files in prefix + delim + entry
if required is None:
required = []
if optional is None:
optional = []
missing = []
ret = {}
for f in required + optional:
try:
ret[f] = load_file(base + delim + f, quiet=False)
except IOError as e:
if e.errno != errno.ENOENT:
raise
if f in required:
missing.append(f)
if len(missing):
raise ValueError("Missing required files: %s", ','.join(missing))
return ret
|
gpl-3.0
| -5,898,552,899,357,104,000
| 29.063017
| 123
| 0.560221
| false
| 3.752789
| false
| false
| false
|
google/ldif
|
ldif/util/math_util.py
|
1
|
3602
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for mathematical operations."""
import itertools
import math
import numpy as np
import tensorflow as tf
def int_log2(i):
"""Computes the floor of the base 2 logarithm of an integer."""
log2 = 0
while i >= 2:
log2 += 1
i = i >> 1
return log2
def nonzero_mean(tensor):
"""The mean over nonzero values in a tensor."""
num = tf.reduce_sum(tensor)
denom = tf.cast(tf.count_nonzero(tensor), dtype=tf.float32)
denom = tf.where(denom == 0.0, 1e-8, denom)
return tf.divide(num, denom)
def increase_frequency(t, out_dim, flatten=False, interleave=True):
"""Maps elements of a tensor to a higher frequency, higher dimensional space.
As shown in NeRF (https://arxiv.org/pdf/2003.08934.pdf), this can help
networks learn higher frequency functions more easily since they are typically
biased to low frequency functions. By increasing the frequency of the input
signal, such biases are mitigated.
Args:
t: Tensor with any shape. Type tf.float32. The normalization of the input
dictates how many dimensions are needed to avoid periodicity. The NeRF
paper normalizes all inputs to the range [0, 1], which is safe.
out_dim: How many (sine, cosine) pairs to generate for each element of t.
Referred to as 'L' in NeRF. Integer.
flatten: Whether to flatten the output tensor to have the same rank as t.
Boolean. See returns section for details.
interleave: Whether to interleave the sin and cos results, as described in
the paper. If true, then the vector will contain [sin(2^0*t_i*pi),
cos(2^0*t_i*pi), sin(2^1*t_i*pi), ...]. If false, some operations will be
avoided, but the order will be [sin(2^0*t_i*pi), sin(2^1*t_i*pi), ...,
cos(2^0*t_i*pi), cos(2^1*t_i*pi), ...].
Returns:
Tensor of type tf.float32. Has shape [..., out_dim*2] if flatten is false.
If flatten is true, then if t has shape [..., N] then the output will have
shape [..., N*out_dim*2].
"""
# TODO(kgenova) Without a custom kernel this is somewhat less efficient,
# because the sin and cos results have to be next to one another in the output
# but tensorflow only allows computing them with two different ops. Thus it is
# necessary to do some expensive tf.concats. It probably won't be a bottleneck
# in most pipelines.
t = math.pi * t
scales = np.power(2, np.arange(out_dim, dtype=np.int32)).astype(np.float32)
t_rank = len(t.shape)
scale_shape = [1] * t_rank + [out_dim]
scales = tf.constant(np.reshape(scales, scale_shape), dtype=tf.float32)
scaled = tf.expand_dims(t, axis=-1) * scales
sin = tf.sin(scaled)
cos = tf.cos(scaled)
output = tf.concat([sin, cos], axis=-1)
if interleave:
sines = tf.unstack(sin, axis=-1)
cosines = tf.unstack(cos, axis=-1)
output = tf.stack(list(itertools.chain(*zip(sines, cosines))), axis=-1)
if flatten:
t_shape = t.get_shape().as_list()
output = tf.reshape(output, t_shape[:-1] + [t_shape[-1] * out_dim * 2])
return output
|
apache-2.0
| -8,889,287,189,012,497,000
| 39.022222
| 80
| 0.691838
| false
| 3.391714
| false
| false
| false
|
lundjordan/slaveapi
|
slaveapi/clients/bugzilla.py
|
1
|
2329
|
from ..global_state import bugzilla_client
import logging
import urllib
from requests import HTTPError
log = logging.getLogger(__name__)
class Bug(object):
def __init__(self, id_, loadInfo=True):
self.id_ = id_
self.data = {}
if loadInfo:
self.refresh()
def refresh(self):
try:
self.data = bugzilla_client.get_bug(self.id_)
self.id_ = self.data["id"]
except HTTPError, e:
log.debug('HTTPError - %s' % e)
def add_comment(self, comment, data={}):
return bugzilla_client.add_comment(self.id_, comment, data)
def update(self, data):
return bugzilla_client.update_bug(self.id_, data)
class ProblemTrackingBug(Bug):
product = "Release Engineering"
component = "Buildduty"
def __init__(self, slave_name, *args, **kwargs):
self.slave_name = slave_name
self.reboot_bug = None
Bug.__init__(self, slave_name, *args, **kwargs)
def create(self):
data = {
"product": self.product,
"component": self.component,
"summary": "%s problem tracking" % self.slave_name,
"version": "other",
"alias": self.slave_name,
# todo: do we care about setting these correctly?
"op_sys": "All",
"platform": "All"
}
resp = bugzilla_client.create_bug(data)
self.id_ = resp["id"]
reboot_product = "Infrastructure & Operations"
reboot_component = "DCOps"
reboot_summary = "%(slave)s is unreachable"
def get_reboot_bug(slave):
qs = "?product=%s&component=%s" % (urllib.quote(reboot_product), urllib.quote(reboot_component))
qs += "&blocks=%s&resolution=---" % slave.bug.id_
summary = reboot_summary % {"slave": slave.name}
for bug in bugzilla_client.request("GET", "bug" + qs)["bugs"]:
if bug["summary"] == summary:
return Bug(bug["id"])
else:
return None
def file_reboot_bug(slave):
data = {
"product": reboot_product,
"component": reboot_component,
"summary": reboot_summary % {"slave": slave.name},
"version": "other",
"op_sys": "All",
"platform": "All",
"blocks": slave.bug.id_,
}
resp = bugzilla_client.create_bug(data)
return Bug(resp["id"])
|
mpl-2.0
| 151,448,738,641,104,300
| 29.246753
| 100
| 0.573637
| false
| 3.65047
| false
| false
| false
|
antoine-de/navitia
|
source/jormungandr/jormungandr/modules/v1_routing/v1_routing.py
|
1
|
10328
|
# coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from jormungandr.interfaces.v1 import Uri, Coverage, Journeys, GraphicalIsochrone, \
HeatMap, Schedules, Places, Ptobjects, Coord, Disruptions, Calendars, \
converters_collection_type, Status, GeoStatus, JSONSchema, LineReports
from werkzeug.routing import BaseConverter, FloatConverter, PathConverter
from jormungandr.modules_loader import AModule
from jormungandr import app
from jormungandr.modules.v1_routing.resources import Index
class RegionConverter(BaseConverter):
""" The region you want to query"""
type_ = str
regex = '[^(/;)]+'
def __init__(self, *args, **kwargs):
BaseConverter.__init__(self, *args, **kwargs)
class LonConverter(FloatConverter):
""" The longitude of where the coord you want to query"""
type_ = float
regex = '-?\\d+(\\.\\d+)?'
def __init__(self, *args, **kwargs):
FloatConverter.__init__(self, *args, **kwargs)
class LatConverter(FloatConverter):
""" The latitude of where the coord you want to query"""
type_ = float
regex = '-?\\d+(\\.\\d+)?'
def __init__(self, *args, **kwargs):
FloatConverter.__init__(self, *args, **kwargs)
class UriConverter(PathConverter):
"""First part of the uri"""
type_ = str
def __init__(self, *args, **kwargs):
PathConverter.__init__(self, *args, **kwargs)
class IdConverter(BaseConverter):
"""Id of the object you want to query"""
type_ = str
def __init__(self, *args, **kwargs):
BaseConverter.__init__(self, *args, **kwargs)
class V1Routing(AModule):
def __init__(self, api, name):
super(V1Routing, self).__init__(api, name,
description='Current version of navitia API',
status='current',
index_endpoint='index')
def setup(self):
self.api.app.url_map.converters['region'] = RegionConverter
self.api.app.url_map.converters['lon'] = LonConverter
self.api.app.url_map.converters['lat'] = LatConverter
self.api.app.url_map.converters['uri'] = UriConverter
self.api.app.url_map.converters['id'] = IdConverter
self.api.app.url_map.strict_slashes = False
self.module_resources_manager.register_resource(Index.Index())
self.add_resource(Index.Index,
'/',
'',
endpoint='index')
self.module_resources_manager.register_resource(Index.TechnicalStatus())
self.add_resource(Index.TechnicalStatus,
'/status',
endpoint='technical_status')
lon_lat = '<lon:lon>;<lat:lat>/'
coverage = '/coverage/'
region = coverage + '<region:region>/'
coord = coverage + lon_lat
self.add_resource(Coverage.Coverage,
coverage,
region,
coord,
endpoint='coverage')
self.add_resource(Coord.Coord,
'/coord/' + lon_lat,
'/coords/' + lon_lat,
endpoint='coord')
collecs = list(converters_collection_type.collections_to_resource_type.keys())
for collection in collecs:
# we want to hide the connections apis, as they are only for debug
hide = collection == 'connections'
self.add_resource(getattr(Uri, collection)(True),
region + collection,
coord + collection,
region + '<uri:uri>/' + collection,
coord + '<uri:uri>/' + collection,
endpoint=collection + '.collection', hide=hide)
if collection == 'connections':
# connections api cannot be query by id
continue
self.add_resource(getattr(Uri, collection)(False),
region + collection + '/<id:id>',
coord + collection + '/<id:id>',
region + '<uri:uri>/' + collection + '/<id:id>',
coord + '<uri:uri>/' + collection + '/<id:id>',
endpoint=collection + '.id', hide=hide)
collecs = ["routes", "lines", "line_groups", "networks", "stop_areas", "stop_points",
"vehicle_journeys"]
for collection in collecs:
self.add_resource(getattr(Uri, collection)(True),
'/' + collection,
endpoint=collection + '.external_codes')
self.add_resource(Places.Places,
region + 'places',
coord + 'places',
'/places',
endpoint='places')
self.add_resource(Ptobjects.Ptobjects,
region + 'pt_objects',
coord + 'pt_objects',
endpoint='pt_objects')
self.add_resource(Places.PlaceUri,
'/places/<id:id>',
region + 'places/<id:id>',
coord + 'places/<id:id>',
endpoint='place_uri')
self.add_resource(Places.PlacesNearby,
region + 'places_nearby',
coord + 'places_nearby',
region + '<uri:uri>/places_nearby',
coord + '<uri:uri>/places_nearby',
'/coord/' + lon_lat + 'places_nearby',
'/coords/' + lon_lat + 'places_nearby',
endpoint='places_nearby')
self.add_resource(Journeys.Journeys,
region + '<uri:uri>/journeys',
coord + '<uri:uri>/journeys',
region + 'journeys',
coord + 'journeys',
'/journeys',
endpoint='journeys',
# we don't want to document those routes as we consider them deprecated
hide_routes=(region + '<uri:uri>/journeys', coord + '<uri:uri>/journeys'))
if app.config['GRAPHICAL_ISOCHRONE']:
self.add_resource(GraphicalIsochrone.GraphicalIsochrone,
region + 'isochrones',
endpoint='isochrones')
if app.config.get('HEAT_MAP'):
self.add_resource(HeatMap.HeatMap,
region + 'heat_maps',
endpoint='heat_maps')
self.add_resource(Schedules.RouteSchedules,
region + '<uri:uri>/route_schedules',
coord + '<uri:uri>/route_schedules',
'/route_schedules',
endpoint='route_schedules')
self.add_resource(Schedules.NextArrivals,
region + '<uri:uri>/arrivals',
coord + '<uri:uri>/arrivals',
region + 'arrivals',
coord + 'arrivals',
endpoint='arrivals')
self.add_resource(Schedules.NextDepartures,
region + '<uri:uri>/departures',
coord + '<uri:uri>/departures',
region + 'departures',
coord + 'departures',
endpoint='departures')
self.add_resource(Schedules.StopSchedules,
region + '<uri:uri>/stop_schedules',
coord + '<uri:uri>/stop_schedules',
'/stop_schedules',
endpoint='stop_schedules')
self.add_resource(Disruptions.TrafficReport,
region + 'traffic_reports',
region + '<uri:uri>/traffic_reports',
endpoint='traffic_reports')
self.add_resource(LineReports.LineReports,
region + 'line_reports',
region + '<uri:uri>/line_reports',
endpoint='line_reports')
self.add_resource(Status.Status,
region + 'status',
endpoint='status')
self.add_resource(GeoStatus.GeoStatus,
region + '_geo_status',
endpoint='geo_status')
self.add_resource(Calendars.Calendars,
region + 'calendars',
region + '<uri:uri>/calendars',
region + "calendars/<id:id>",
endpoint="calendars")
self.add_resource(JSONSchema.Schema,
'/schema',
endpoint="schema")
|
agpl-3.0
| -5,484,635,130,334,410,000
| 40.477912
| 100
| 0.512006
| false
| 4.51596
| false
| false
| false
|
Tocknicsu/nctuoj
|
backend/service/verdict.py
|
1
|
5475
|
from service.base import BaseService
from req import Service
from utils.form import form_validation
import os
import config
class VerdictService(BaseService):
def __init__(self, db, rs):
super().__init__(db, rs)
VerdictService.inst = self
def get_verdict_list(self, data={}):
# res = self.rs.get('verdict_list')
# if res: return (None, res)
required_args = [{
'name': 'problem_id',
'type': int,
}]
err = form_validation(data, required_args)
if err: return (err, None)
sql = "SELECT v.*, u.account as setter_user FROM verdicts as v, users as u WHERE v.setter_user_id=u.id"
param = tuple()
if 'problem_id' in data and data['problem_id']:
sql += ' AND (v.problem_id=%s OR v.problem_id=0)'
param = (data['problem_id'],)
res = yield self.db.execute(sql, param)
res = res.fetchall()
# self.rs.set('verdict_list', res)
return (None, res)
def get_verdict_type(self):
# res = self.rs.get('verdict_type')
# if res: return (None, res)
res = { x['id']: x for x in (yield self.db.execute("SELECT * FROM map_verdict_string order by id"))}
# self.rs.set('verdict_type', res)
return (None, res)
def get_verdict(self, data={}):
required_args = [{
'name': '+id',
'type': int,
}]
err = form_validation(data, required_args)
if err: return (err, None)
if int(data['id']) == 0:
col = ['id', 'title', 'execute_type_id', 'execute_type_id', 'file_name', 'setter_user_id']
res = {x: '' for x in col}
res['id'] = 0
return (None, res)
# res = self.rs.get('verdict@%s'%str(data['id']))
# if res: return (None, res)
res = yield self.db.execute('SELECT v.*, u.account as setter_user FROM verdicts as v, users as u WHERE v.id=%s AND v.setter_user_id=u.id;', (data['id'],))
if res.rowcount == 0:
return ((404, 'No Verdict ID'), None)
res = res.fetchone()
err, res['execute_type'] = yield from Service.Execute.get_execute({'id': res['execute_type_id']})
folder = '%s/data/verdicts/%s/' % (config.DATAROOT, str(res['id']))
file_path = '%s/%s' % (folder, res['file_name'])
try: os.makedirs(folder)
except: pass
with open(file_path) as f:
res['code'] = f.read()
res['code_line'] = len(open(file_path).readlines())
# self.rs.set('verdict@%s'%(str(data['id'])), res)
return (None, res)
def post_verdict(self ,data={}):
required_args = [{
'name': '+title',
'type': str,
}, {
'name': '+execute_type_id',
'type': int,
}, {
'name': '+setter_user_id',
'type': int,
}, {
'name': '+code_file',
}]
err = form_validation(data, required_args)
if err: return (err, None)
code_file = None
if data['code_file'] is None:
return ((400, 'No code file'), None)
data['file_name'] = data['code_file']['filename']
code_file = data.pop('code_file')
sql, param = self.gen_insert_sql('verdicts', data)
id = (yield self.db.execute(sql, param)).fetchone()['id']
if code_file:
folder = '%s/data/verdicts/%s/' % (config.DATAROOT, str(id))
file_path = '%s/%s' % (folder, data['file_name'])
try: shutil.rmtree(folder)
except: pass
try: os.makedirs(folder)
except: pass
with open(file_path, 'wb+') as f:
f.write(code_file['body'])
return (None, str(id))
def put_verdict(self ,data={}):
required_args = [{
'name': '+id',
'type': int,
}, {
'name': '+title',
'type': str,
}, {
'name': '+execute_type_id',
'type': int,
}, {
'name': '+setter_user_id',
'type': int,
}, {
'name': 'code_file',
}]
err = form_validation(data, required_args)
if err: return (err, None)
code_file = data.pop('code_file')
if code_file: data['file_name'] = code_file['filename']
sql, param = self.gen_update_sql('verdicts', data)
id = data.pop('id')
yield self.db.execute(sql+' WHERE id=%s;', param+(id,))
if code_file:
folder = '%s/data/verdicts/%s/' % (config.DATAROOT, str(id))
file_path = '%s/%s' % (folder, data['file_name'])
try: shutil.rmtree(folder)
except: pass
try: os.makedirs(folder)
except: pass
with open(file_path, 'wb+') as f:
f.write(code_file['body'])
# self.rs.delete('verdict@%s'%(str(id)))
# self.rs.delete('verdict_list')
return (None, str(id))
def delete_verdict(self, data={}):
required_args = [{
'name': '+id',
'type': int,
}]
err = form_validation(data, required_args)
if err: return (err, None)
yield self.db.execute('DELETE FROM verdicts WHERE id=%s;', (data['id'],))
# self.rs.delete('verdict_list')
# self.rs.delete('verdict@%s'%(str(data['id'])))
return (None, str(data['id']))
|
mit
| -6,884,955,607,823,547,000
| 35.019737
| 162
| 0.498265
| false
| 3.518638
| false
| false
| false
|
gynvael/stream
|
017-osdev-06/build.py
|
1
|
1266
|
#!/usr/bin/python
import os
import subprocess
def fix_stage1_size():
stage2_size = os.stat("stage2").st_size
kernel_size = os.stat("kernel64").st_size
stage2_size = (stage2_size + kernel_size + 511) / 512
if stage2_size >= 255:
raise Exception("stage2 & kernel are too large")
with open("stage1", "rb+") as f:
d = f.read()
idx = d.index("\xb0\xcc\x90\x90")
d = bytearray(d)
d[idx+1] = stage2_size
f.seek(0)
f.write(d)
cc_flags = "-std=c99 -nostdlib -o kernel64 -O3 -Wall -Wextra -masm=intel"
cmds_to_run = [
"gcc kernel.c " + cc_flags,
"strip kernel64",
"nasm stage1.asm",
"nasm stage2.asm",
fix_stage1_size
]
files_to_img = [
"stage1",
"stage2",
"kernel64"
]
for cmd in cmds_to_run:
if type(cmd) is str:
print "Running:", cmd
print subprocess.check_output(cmd, shell=True)
else:
print "Calling:", cmd.func_name
cmd()
buf = []
for fn in files_to_img:
with open(fn, "rb") as f:
d = f.read()
buf.append(d)
if len(d) % 512 == 0:
continue
padding_size = 512 - len(d) % 512
buf.append("\0" * padding_size);
with open("floppy.bin", "wb") as f:
f.write(''.join(buf))
|
mit
| -4,116,388,021,985,915,400
| 19.1
| 73
| 0.554502
| false
| 2.825893
| false
| false
| false
|
googleapis/python-dialogflow-cx
|
google/cloud/dialogflowcx_v3/types/entity_type.py
|
1
|
13987
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dialogflow.cx.v3",
manifest={
"EntityType",
"ListEntityTypesRequest",
"ListEntityTypesResponse",
"GetEntityTypeRequest",
"CreateEntityTypeRequest",
"UpdateEntityTypeRequest",
"DeleteEntityTypeRequest",
},
)
class EntityType(proto.Message):
r"""Entities are extracted from user input and represent parameters that
are meaningful to your application. For example, a date range, a
proper name such as a geographic location or landmark, and so on.
Entities represent actionable data for your application.
When you define an entity, you can also include synonyms that all
map to that entity. For example, "soft drink", "soda", "pop", and so
on.
There are three types of entities:
- **System** - entities that are defined by the Dialogflow API for
common data types such as date, time, currency, and so on. A
system entity is represented by the ``EntityType`` type.
- **Custom** - entities that are defined by you that represent
actionable data that is meaningful to your application. For
example, you could define a ``pizza.sauce`` entity for red or
white pizza sauce, a ``pizza.cheese`` entity for the different
types of cheese on a pizza, a ``pizza.topping`` entity for
different toppings, and so on. A custom entity is represented by
the ``EntityType`` type.
- **User** - entities that are built for an individual user such as
favorites, preferences, playlists, and so on. A user entity is
represented by the
[SessionEntityType][google.cloud.dialogflow.cx.v3.SessionEntityType]
type.
For more information about entity types, see the `Dialogflow
documentation <https://cloud.google.com/dialogflow/docs/entities-overview>`__.
Attributes:
name (str):
The unique identifier of the entity type. Required for
[EntityTypes.UpdateEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.UpdateEntityType].
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/entityTypes/<Entity Type ID>``.
display_name (str):
Required. The human-readable name of the
entity type, unique within the agent.
kind (google.cloud.dialogflowcx_v3.types.EntityType.Kind):
Required. Indicates the kind of entity type.
auto_expansion_mode (google.cloud.dialogflowcx_v3.types.EntityType.AutoExpansionMode):
Indicates whether the entity type can be
automatically expanded.
entities (Sequence[google.cloud.dialogflowcx_v3.types.EntityType.Entity]):
The collection of entity entries associated
with the entity type.
excluded_phrases (Sequence[google.cloud.dialogflowcx_v3.types.EntityType.ExcludedPhrase]):
Collection of exceptional words and phrases that shouldn't
be matched. For example, if you have a size entity type with
entry ``giant``\ (an adjective), you might consider adding
``giants``\ (a noun) as an exclusion. If the kind of entity
type is ``KIND_MAP``, then the phrases specified by entities
and excluded phrases should be mutually exclusive.
enable_fuzzy_extraction (bool):
Enables fuzzy entity extraction during
classification.
redact (bool):
Indicates whether parameters of the entity
type should be redacted in log. If redaction is
enabled, page parameters and intent parameters
referring to the entity type will be replaced by
parameter name when logging.
"""
class Kind(proto.Enum):
r"""Represents kinds of entities."""
KIND_UNSPECIFIED = 0
KIND_MAP = 1
KIND_LIST = 2
KIND_REGEXP = 3
class AutoExpansionMode(proto.Enum):
r"""Represents different entity type expansion modes. Automated
expansion allows an agent to recognize values that have not been
explicitly listed in the entity (for example, new kinds of
shopping list items).
"""
AUTO_EXPANSION_MODE_UNSPECIFIED = 0
AUTO_EXPANSION_MODE_DEFAULT = 1
class Entity(proto.Message):
r"""An **entity entry** for an associated entity type.
Attributes:
value (str):
Required. The primary value associated with this entity
entry. For example, if the entity type is *vegetable*, the
value could be *scallions*.
For ``KIND_MAP`` entity types:
- A canonical value to be used in place of synonyms.
For ``KIND_LIST`` entity types:
- A string that can contain references to other entity
types (with or without aliases).
synonyms (Sequence[str]):
Required. A collection of value synonyms. For example, if
the entity type is *vegetable*, and ``value`` is
*scallions*, a synonym could be *green onions*.
For ``KIND_LIST`` entity types:
- This collection must contain exactly one synonym equal to
``value``.
"""
value = proto.Field(proto.STRING, number=1,)
synonyms = proto.RepeatedField(proto.STRING, number=2,)
class ExcludedPhrase(proto.Message):
r"""An excluded entity phrase that should not be matched.
Attributes:
value (str):
Required. The word or phrase to be excluded.
"""
value = proto.Field(proto.STRING, number=1,)
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
kind = proto.Field(proto.ENUM, number=3, enum=Kind,)
auto_expansion_mode = proto.Field(proto.ENUM, number=4, enum=AutoExpansionMode,)
entities = proto.RepeatedField(proto.MESSAGE, number=5, message=Entity,)
excluded_phrases = proto.RepeatedField(
proto.MESSAGE, number=6, message=ExcludedPhrase,
)
enable_fuzzy_extraction = proto.Field(proto.BOOL, number=7,)
redact = proto.Field(proto.BOOL, number=9,)
class ListEntityTypesRequest(proto.Message):
r"""The request message for
[EntityTypes.ListEntityTypes][google.cloud.dialogflow.cx.v3.EntityTypes.ListEntityTypes].
Attributes:
parent (str):
Required. The agent to list all entity types for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
language_code (str):
The language to list entity types for. The following fields
are language dependent:
- ``EntityType.entities.value``
- ``EntityType.entities.synonyms``
- ``EntityType.excluded_phrases.value``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
page_size (int):
The maximum number of items to return in a
single page. By default 100 and at most 1000.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(proto.STRING, number=1,)
language_code = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
class ListEntityTypesResponse(proto.Message):
r"""The response message for
[EntityTypes.ListEntityTypes][google.cloud.dialogflow.cx.v3.EntityTypes.ListEntityTypes].
Attributes:
entity_types (Sequence[google.cloud.dialogflowcx_v3.types.EntityType]):
The list of entity types. There will be a maximum number of
items returned based on the page_size field in the request.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
entity_types = proto.RepeatedField(proto.MESSAGE, number=1, message="EntityType",)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetEntityTypeRequest(proto.Message):
r"""The request message for
[EntityTypes.GetEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.GetEntityType].
Attributes:
name (str):
Required. The name of the entity type. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/entityTypes/<Entity Type ID>``.
language_code (str):
The language to retrieve the entity type for. The following
fields are language dependent:
- ``EntityType.entities.value``
- ``EntityType.entities.synonyms``
- ``EntityType.excluded_phrases.value``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
"""
name = proto.Field(proto.STRING, number=1,)
language_code = proto.Field(proto.STRING, number=2,)
class CreateEntityTypeRequest(proto.Message):
r"""The request message for
[EntityTypes.CreateEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.CreateEntityType].
Attributes:
parent (str):
Required. The agent to create a entity type for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
entity_type (google.cloud.dialogflowcx_v3.types.EntityType):
Required. The entity type to create.
language_code (str):
The language of the following fields in ``entity_type``:
- ``EntityType.entities.value``
- ``EntityType.entities.synonyms``
- ``EntityType.excluded_phrases.value``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
"""
parent = proto.Field(proto.STRING, number=1,)
entity_type = proto.Field(proto.MESSAGE, number=2, message="EntityType",)
language_code = proto.Field(proto.STRING, number=3,)
class UpdateEntityTypeRequest(proto.Message):
r"""The request message for
[EntityTypes.UpdateEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.UpdateEntityType].
Attributes:
entity_type (google.cloud.dialogflowcx_v3.types.EntityType):
Required. The entity type to update.
language_code (str):
The language of the following fields in ``entity_type``:
- ``EntityType.entities.value``
- ``EntityType.entities.synonyms``
- ``EntityType.excluded_phrases.value``
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/cx/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The mask to control which fields get updated.
"""
entity_type = proto.Field(proto.MESSAGE, number=1, message="EntityType",)
language_code = proto.Field(proto.STRING, number=2,)
update_mask = proto.Field(
proto.MESSAGE, number=3, message=field_mask_pb2.FieldMask,
)
class DeleteEntityTypeRequest(proto.Message):
r"""The request message for
[EntityTypes.DeleteEntityType][google.cloud.dialogflow.cx.v3.EntityTypes.DeleteEntityType].
Attributes:
name (str):
Required. The name of the entity type to delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/entityTypes/<Entity Type ID>``.
force (bool):
This field has no effect for entity type not being used. For
entity types that are used by intents or pages:
- If ``force`` is set to false, an error will be returned
with message indicating the referencing resources.
- If ``force`` is set to true, Dialogflow will remove the
entity type, as well as any references to the entity type
(i.e. Page
[parameter][google.cloud.dialogflow.cx.v3.Form.Parameter]
of the entity type will be changed to '@sys.any' and
intent
[parameter][google.cloud.dialogflow.cx.v3.Intent.Parameter]
of the entity type will be removed).
"""
name = proto.Field(proto.STRING, number=1,)
force = proto.Field(proto.BOOL, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| 1,964,519,015,032,665,000
| 40.017595
| 109
| 0.645028
| false
| 4.285233
| false
| false
| false
|
opennode/nodeconductor
|
waldur_core/cost_tracking/tasks.py
|
1
|
2343
|
from celery import shared_task
from waldur_core.cost_tracking import CostTrackingRegister, models
from waldur_core.structure import models as structure_models
@shared_task(name='waldur_core.cost_tracking.recalculate_estimate')
def recalculate_estimate(recalculate_total=False):
""" Recalculate price of consumables that were used by resource until now.
Regular task. It is too expensive to calculate consumed price on each
request, so we store cached price each hour.
If recalculate_total is True - task also recalculates total estimate
for current month.
"""
# Celery does not import server.urls and does not discover cost tracking modules.
# So they should be discovered implicitly.
CostTrackingRegister.autodiscover()
# Step 1. Recalculate resources estimates.
for resource_model in CostTrackingRegister.registered_resources:
for resource in resource_model.objects.all():
_update_resource_consumed(resource, recalculate_total=recalculate_total)
# Step 2. Move from down to top and recalculate consumed estimate for each
# object based on its children.
ancestors_models = [m for m in models.PriceEstimate.get_estimated_models()
if not issubclass(m, structure_models.ResourceMixin)]
for model in ancestors_models:
for ancestor in model.objects.all():
_update_ancestor_consumed(ancestor)
def _update_resource_consumed(resource, recalculate_total):
price_estimate, created = models.PriceEstimate.objects.get_or_create_current(scope=resource)
if created:
models.ConsumptionDetails.objects.create(price_estimate=price_estimate)
price_estimate.create_ancestors()
price_estimate.update_total()
elif recalculate_total:
price_estimate.update_total()
price_estimate.update_consumed()
def _update_ancestor_consumed(ancestor):
price_estimate, _ = models.PriceEstimate.objects.get_or_create_current(scope=ancestor)
resource_descendants = [descendant for descendant in price_estimate.get_descendants()
if isinstance(descendant.scope, structure_models.ResourceMixin)]
price_estimate.consumed = sum([descendant.consumed for descendant in resource_descendants])
price_estimate.save(update_fields=['consumed'])
|
mit
| 8,238,126,947,783,257,000
| 47.8125
| 96
| 0.729834
| false
| 4.379439
| false
| false
| false
|
wodo/WebTool3
|
webtool/server/models/event.py
|
1
|
20122
|
# -*- coding: utf-8 -*-
from django.db import models
from django.template.defaultfilters import date, time
from .reference import Reference
from .equipment import Equipment
from .approximate import Approximate
from .mixins import SeasonMixin, DescriptionMixin
from .time_base import TimeMixin
from . import fields
class EventManager(models.Manager):
def get_by_natural_key(self, season, reference):
reference = Reference.get_reference(reference, season)
return reference.event
class Event(SeasonMixin, TimeMixin, DescriptionMixin, models.Model):
"""
The option (blank=True, default='') for CharField describes an optional element
field == '' => data is not available
field != '' => data is Valid
The option (blank=True, null=True) for the other fields describes an optional element
field is None => data is not available
field is not None => data is Valid
"""
objects = EventManager()
# noinspection PyUnresolvedReferences
reference = models.OneToOneField(
'Reference',
primary_key=True,
verbose_name='Buchungscode',
related_name='event',
on_delete=models.PROTECT,
)
location = fields.LocationField()
reservation_service = models.BooleanField(
'Reservierungswunsch für Schulungsraum',
db_index=True,
blank=True, default=False
)
start_date = models.DateField(
'Abreisetag',
db_index=True
)
start_time = models.TimeField(
'Abreisezeit (Genau)',
blank=True, null=True,
help_text="Je nach Abreisezeit wird eventuell Urlaub benötgit",
)
# approximate is valid only if start_time is None
approximate = models.ForeignKey(
Approximate,
db_index=True,
verbose_name='Abreisezeit (Ungefähr)',
related_name='event_list',
blank=True, null=True,
help_text="Je nach Abreisezeit wird eventuell Urlaub benötigt",
on_delete=models.PROTECT,
)
end_date = models.DateField(
'Rückkehr',
blank=True, null=True,
help_text="Nur wenn die Veranstaltung mehr als einen Tag dauert",
)
end_time = models.TimeField(
'Rückkehrzeit',
blank=True, null=True,
help_text="z.B. Ungefähr bei Touren/Kursen - Genau bei Vorträgen",
)
link = models.URLField(
'Beschreibung',
blank=True, default='',
help_text="Eine URL zur Veranstaltungsbeschreibung auf der Homepage",
)
map = models.FileField(
'Kartenausschnitt',
blank=True, default='',
help_text="Eine URL zu einem Kartenausschnitt des Veranstaltungsgebietes",
)
distal = models.BooleanField(
'Mit gemeinsamer Anreise',
db_index=True,
blank=True, default=False,
)
# rendezvous, source and distance valid only, if distal_event == True
rendezvous = fields.LocationField(
'Treffpunkt',
help_text="Treffpunkt für die Abfahrt z.B. Edelweissparkplatz",
)
source = fields.LocationField(
'Ausgangsort',
help_text="Treffpunkt vor Ort",
)
public_transport = models.BooleanField(
'Öffentliche Verkehrsmittel',
db_index=True,
blank=True, default=False
)
# distance valid only, if public_transport == False
distance = fields.DistanceField()
# lea valid only, if public_transport == True
lea = models.BooleanField(
'Low Emission Adventure',
db_index=True,
blank=True, default=False
)
new = models.BooleanField(
'Markierung für Neue Veranstaltungen',
db_index=True,
blank=True, default=False
)
shuttle_service = models.BooleanField(
'Reservierungswunsch für AlpinShuttle',
db_index=True,
blank=True, default=False
)
# check event.season == instruction.topic.season
# noinspection PyUnresolvedReferences
instruction = models.ForeignKey(
'Instruction',
db_index=True,
blank=True, null=True,
verbose_name='Kurs',
related_name='meeting_list',
on_delete=models.PROTECT,
)
def natural_key(self):
return self.season.name, str(self.reference)
natural_key.dependencies = ['server.season', 'server.reference']
def __str__(self):
if hasattr(self, 'meeting') and not self.meeting.is_special:
title = self.meeting.topic.title
else:
title = self.title
return "{} - {}, {} [{}]".format(self.reference, title, self.long_date(with_year=True), self.season.name)
def long_date(self, with_year=False, with_time=False):
"""
:param with_year: False
5. September
22. bis 25. Januar
28. Mai bis 3. Juni
30. Dezember 2016 bis 6. Januar 2017
:param with_year: True
5. September 2016
22. bis 25. Januar 2016
28. Mai bis 3. Juni 2016
30. Dezember 2016 bis 6. Januar 2017
:return: long formatted date
"""
y = ' Y' if with_year else ''
if self.end_date is None or self.start_date == self.end_date:
value = date(self.start_date, "j. F" + y)
if with_time and self.start_time:
if self.end_time is None or self.start_time == self.end_time:
if self.start_time.minute:
if self.start_time.minute < 10:
minute = time(self.start_time, "i")[1:]
else:
minute = time(self.start_time, "i")
value = "{}, {}.{}".format(value, time(self.start_time, "G"), minute)
else:
value = "{}, {}".format(value, time(self.start_time, "G"))
else:
if self.end_time.minute:
if self.start_time.minute < 10:
minute = time(self.start_time, "i")[1:]
else:
minute = time(self.start_time, "i")
value = "{}, {}.{}".format(value, time(self.start_time, "G"), minute)
else:
value = "{} bis {}".format(value, time(self.start_time, "G"))
value = "{} Uhr".format(value)
return value
elif self.start_date.month == self.end_date.month and self.start_date.year == self.end_date.year:
return "{0} bis {1}".format(date(self.start_date, "j."), date(self.end_date, "j. F" + y))
elif self.start_date.month != self.end_date.month:
y0 = ''
if self.start_date.year != self.end_date.year:
y0 = y = ' Y'
return "{0} bis {1}".format(date(self.start_date, "j. F" + y0), date(self.end_date, "j. F" + y))
def short_date(self, with_year=False):
"""
:param with_year: False
05.09.
22.01 - 25.01.
28.05. - 03.06.
:param with_year: True
05.09.2016
22.01.2016 - 25.01.2016
28.05.2016 - 03.06.2016
:return: short formatted date
"""
y = 'Y' if with_year else ''
if self.end_date is None or self.start_date == self.end_date:
return date(self.start_date, "d.m." + y)
return "{0} - {1}".format(date(self.start_date, "d.m." + y), date(self.end_date, "d.m." + y))
def departure(self):
"""
{start_date}, {start_time}, {rendezvous}, Heimkehr am {end_date} gegen {end_time} Uhr
"""
season_year = int(self.season.name)
with_year = season_year != self.start_date.year or (self.end_date and season_year != self.end_date.year)
y = 'Y' if with_year else ''
start_date = date(self.start_date, "j.n." + y)
if self.start_time:
if self.start_time.minute:
start_time = time(self.start_time, "G.i")
else:
start_time = time(self.start_time, "G")
start_time = "{} Uhr".format(start_time)
else:
start_time = self.approximate.name if self.approximate else ''
if self.end_date and self.end_date != self.start_date:
end_date = date(self.end_date, "j.n." + y)
else:
end_date = ''
if self.end_time:
if self.end_time.minute:
end_time = time(self.end_time, "G.i")
else:
end_time = time(self.end_time, "G")
else:
end_time = ''
departure = "{}, {}".format(start_date, start_time)
if self.rendezvous:
departure = "{}, {}".format(departure, self.rendezvous)
if end_time:
departure = "{}, Heimkehr".format(departure)
if end_date:
departure = "{} am {}".format(departure, end_date)
departure = "{} gegen {} Uhr".format(departure, end_time)
return departure
def appointment(self):
"""
{start_date}, {start_time} Uhr, {name}, {location}, {rendezvous}
{start_date}, {start_time} bis {end_time} Uhr, {name}, {location}, {rendezvous}
{start_date}, {start_time} Uhr bis {end_date}, {end_time} Uhr, {name}, {location}, {rendezvous}
{start_date}, {start_time} Uhr bis {end_date}, {name}, {location}, {rendezvous}
"""
appointment = ''
season_year = int(self.season.name)
with_year = season_year != self.start_date.year or (self.end_date and season_year != self.end_date.year)
y = 'Y' if with_year else ''
start_date = date(self.start_date, "j.n." + y)
end_date = date(self.end_date, "j.n." + y) if self.end_date else ''
approximate = ''
if self.start_time:
if self.start_time.minute:
start_time = time(self.start_time, "G.i")
else:
start_time = time(self.start_time, "G")
elif self.approximate:
start_time = ''
approximate = self.approximate.name
else:
start_time = ''
if self.end_time:
if self.end_time.minute:
end_time = time(self.end_time, "G.i")
else:
end_time = time(self.end_time, "G")
else:
end_time = ''
if start_time:
appointment = "{}, {}".format(start_date, start_time)
if end_time:
if end_date:
appointment = "{} Uhr bis {}, {} Uhr".format(appointment, end_date, end_time)
else:
appointment = "{} bis {} Uhr".format(appointment, end_time)
else:
appointment = "{} Uhr".format(appointment)
if approximate:
appointment = "{}, {}".format(start_date, approximate)
if self.name:
appointment = "{}, {}".format(appointment, self.name)
if self.location:
appointment = "{}, {}".format(appointment, self.location)
if self.rendezvous:
appointment = "{}, {}".format(appointment, self.rendezvous)
return appointment
def prefixed_date(self, prefix, formatter, with_year=False):
"""
Beispiel: "Anmeldung bis 10.03."
:param prefix:
:param formatter: a unbound methode like short_date or long_date
:param with_year:
:return:
"""
return "{} {}".format(prefix, formatter(self, with_year))
@property
def activity(self):
if hasattr(self, 'tour') and self.tour:
return "tour"
if hasattr(self, 'talk') and self.talk:
return "talk"
if hasattr(self, 'meeting') and self.meeting:
return "topic"
if hasattr(self, 'session') and self.session:
return "collective"
@property
def division(self):
winter = self.reference.category.winter
summer = self.reference.category.summer
indoor = self.reference.category.climbing
if winter and not summer and not indoor:
return "winter"
elif not winter and summer and not indoor:
return "summer"
elif not winter and not summer and indoor:
return "indoor"
else:
return "misc"
@property
def state(self):
if hasattr(self, 'tour') and self.tour:
state = self.tour.state
elif hasattr(self, 'talk') and self.talk:
state = self.talk.state
elif hasattr(self, 'meeting') and self.meeting:
state = self.meeting.state
elif hasattr(self, 'session') and self.session:
state = self.session.state
else:
return None
if state:
if state.done:
return "done"
if state.moved:
return "moved"
if state.canceled:
return "canceled"
if state.unfeasible:
return "unfeasible"
if state.public:
return "public"
else:
return "private"
@property
def quantity(self):
if hasattr(self, 'tour') and self.tour:
min_quantity = self.tour.min_quantity
max_quantity = self.tour.max_quantity
cur_quantity = self.tour.cur_quantity
elif hasattr(self, 'talk') and self.talk:
min_quantity = self.talk.min_quantity
max_quantity = self.talk.max_quantity
cur_quantity = self.talk.cur_quantity
elif hasattr(self, 'meeting') and self.meeting:
min_quantity = self.meeting.min_quantity
max_quantity = self.meeting.max_quantity
cur_quantity = self.meeting.cur_quantity
else:
return None
return {
"min": min_quantity,
"max": max_quantity,
"current": cur_quantity
}
@property
def admission(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.admission
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.admission
if hasattr(self, 'talk') and self.talk:
return self.talk.admission
@property
def extra_charges(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.extra_charges
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.extra_charges
@property
def extra_charges_info(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.extra_charges_info
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.extra_charges_info
@property
def advances(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.advances
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.advances
@property
def advances_info(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.advances_info
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.advances_info
@property
def speaker(self):
if hasattr(self, 'talk') and self.talk:
return self.talk.speaker
if hasattr(self, 'session') and self.session:
return self.session.speaker
return None
@property
def guide(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.guide
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.guide
if hasattr(self, 'session') and self.session:
return self.session.guide
@property
def guides(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.guides()
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.guides()
if hasattr(self, 'session') and self.session:
return self.session.guides()
@property
def skill(self):
skill = None
if hasattr(self, 'tour') and self.tour:
skill = self.tour.skill
if hasattr(self, 'session') and self.session:
skill = self.session.skill if skill.code != "x" else None
return skill.order if skill else None
@property
def fitness(self):
fitness = None
if hasattr(self, 'tour') and self.tour:
fitness = self.tour.fitness
if hasattr(self, 'session') and self.session:
fitness = self.session.fitness if fitness.code != "x" else None
return fitness.order if fitness else None
@property
def ladies_only(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.ladies_only
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.ladies_only
if hasattr(self, 'session') and self.session:
return self.session.ladies_only
return False
@property
def youth_on_tour(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.youth_on_tour
return False
@property
def preconditions(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.preconditions
if hasattr(self, 'meeting') and self.meeting:
if self.meeting.is_special:
return self.meeting.preconditions
else:
return self.meeting.topic.preconditions
return None
@property
def equipments(self):
equipments = Equipment.objects.none()
misc = ''
if hasattr(self, 'tour') and self.tour:
equipments = self.tour.equipments
misc = self.tour.misc_equipment
if hasattr(self, 'meeting') and self.meeting:
if self.meeting.is_special:
equipments = self.meeting.equipments
misc = self.meeting.misc_equipment
else:
equipments = self.meeting.topic.equipments
misc = self.meeting.topic.misc_equipment
if hasattr(self, 'session') and self.session:
equipments = self.session.equipments
misc = self.session.misc_equipment
equipment_list = []
for equipment in equipments.all():
equipment_list.append(dict(code=equipment.code, name=equipment.name))
equipments = {}
if equipment_list:
equipments.update(dict(list=equipment_list))
if misc:
equipments.update(dict(misc=misc))
return equipments if equipments else None
@property
def team(self):
team = None
if hasattr(self, 'tour') and self.tour:
team = self.tour.team
if hasattr(self, 'meeting') and self.meeting:
team = self.meeting.team
if hasattr(self, 'session') and self.session:
team = self.session.team
return ', '.join(team.values_list('user__username', flat=True)) if team else None
@property
def subject(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.subject()
if hasattr(self, 'talk') and self.talk:
return self.talk.subject()
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.subject()
if hasattr(self, 'session') and self.session:
return self.session.subject()
@property
def details(self):
if hasattr(self, 'tour') and self.tour:
return self.tour.details()
if hasattr(self, 'talk') and self.talk:
return self.talk.details()
if hasattr(self, 'meeting') and self.meeting:
return self.meeting.details()
if hasattr(self, 'session') and self.session:
return self.session.details()
class Meta:
get_latest_by = "updated"
verbose_name = "Veranstaltungstermin"
verbose_name_plural = "Veranstaltungstermine"
ordering = ('start_date', )
|
bsd-2-clause
| -9,152,673,757,879,513,000
| 32.855219
| 113
| 0.564147
| false
| 3.728908
| false
| false
| false
|
deepmind/open_spiel
|
open_spiel/python/bots/is_mcts_test.py
|
1
|
2548
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for Information Set MCTS bot.
This test mimics the basic C++ tests in algorithms/is_mcts_test.cc.
"""
# pylint: disable=g-unreachable-test-method
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from open_spiel.python.algorithms import evaluate_bots
import pyspiel
SEED = 12983641
class ISMCTSBotTest(absltest.TestCase):
def ismcts_play_game(self, game):
evaluator = pyspiel.RandomRolloutEvaluator(1, SEED)
for final_policy_type in [
pyspiel.ISMCTSFinalPolicyType.NORMALIZED_VISIT_COUNT,
pyspiel.ISMCTSFinalPolicyType.MAX_VISIT_COUNT,
pyspiel.ISMCTSFinalPolicyType.MAX_VALUE
]:
bot = pyspiel.ISMCTSBot(SEED, evaluator, 5.0, 1000, -1, final_policy_type,
False, False)
bots = [bot] * game.num_players()
evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
bot = pyspiel.ISMCTSBot(SEED, evaluator, 5.0, 1000, 10, final_policy_type,
False, False)
bots = [bot] * game.num_players()
evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
bot = pyspiel.ISMCTSBot(SEED, evaluator, 5.0, 1000, 10, final_policy_type,
True, True)
bots = [bot] * game.num_players()
evaluate_bots.evaluate_bots(game.new_initial_state(), bots, np.random)
def test_basic_sim_kuhn(self):
game = pyspiel.load_game("kuhn_poker")
self.ismcts_play_game(game)
game = pyspiel.load_game("kuhn_poker(players=3)")
self.ismcts_play_game(game)
def test_basic_sim_leduc(self):
game = pyspiel.load_game("leduc_poker")
self.ismcts_play_game(game)
game = pyspiel.load_game("leduc_poker(players=3)")
self.ismcts_play_game(game)
if __name__ == "__main__":
np.random.seed(SEED)
absltest.main()
|
apache-2.0
| 1,481,365,194,119,847,400
| 35.4
| 80
| 0.68956
| false
| 3.287742
| true
| false
| false
|
googleinterns/contextual-adjectives
|
generate_noun_to_adj_list/noun_to_adj_gen.py
|
1
|
4308
|
"""Code to generate noun to adjective dictionary"""
import nltk
from nltk.tokenize.treebank import TreebankWordTokenizer
from bert_setup import Bert
class NounToAdjGen:
"""Add adjectives for nouns in dictionary noun_to_adj.
Attributes:
noun_to_adj : Noun to adjective dictionary.
tokenizer : An instance of nltk's tokenizer.
bert_model : An instance of class bert.
adj_tags : Tags of adjectives in nltk.
noun_tags : Tags of nouns in nltk.
noun_list : List of nouns that we are working on.
adj_list : List of adjectives that we are working on.
"""
def __init__(self, noun_list, adj_list):
"""Initializing noun to adjective dictionary."""
self.noun_to_adj = {}
for noun in noun_list:
self.noun_to_adj[noun] = []
# Use nltk treebank tokenizer
self.tokenizer = TreebankWordTokenizer()
# Initializing the bert class
self.bert_model = Bert()
# https://pythonprogramming.net/natural-language-toolkit-nltk-part-speech-tagging/
self.adj_tags = ['JJ', 'JJR', 'JJS']
self.noun_tags = ['NN', 'NNS', 'NNP', 'NNPS']
self.noun_list = noun_list
self.adj_list = adj_list
def add_to_dictionary(self, sentences, num_of_perturb):
"""Add adjectives for nouns by perturbing sentence to noun_to_adj.
Args:
sentences : The list of sentences for which to look up for nouns and adjs.
num_of_perturb : Number of perturbations you want to make for a word in a sentence
"""
for sent in sentences:
# Tokenizing and POS tagging the sentence
pos_inf = nltk.tag.pos_tag(self.tokenizer.tokenize(sent))
for idx, (word, tag) in enumerate(pos_inf):
word = word.lower()
if tag in self.noun_tags and word in self.noun_list:
valid_adj_index = []
if idx != 0:
valid_adj_index.append(idx-1)
if idx != (len(pos_inf)-1):
valid_adj_index.append(idx+1)
for adj_index in valid_adj_index:
word1, tag1 = pos_inf[adj_index]
word1 = word1.lower()
if tag1 in self.adj_tags and word1 in self.adj_list:
self.add_adjectives(sent, num_of_perturb, adj_index, word)
self.add_nouns(sent, num_of_perturb, idx, word1)
elif tag1 in self.adj_tags:
self.add_adjectives(sent, num_of_perturb, adj_index, word)
def add_adjectives(self, sent, num_of_perturb, adj_index, word):
"""Ask bert for suggestions for more adjectives and add their intersection
with adjectives list to the dictionary.
Args:
sent : The sentence for which use bert to find more adjectives.
num_of_perturb : Number of perturbations you want to make for a word in a sentence
adj_index : The index of the word need to be perturbed in the sentence.
word : The noun for which we are looking for adjectives
"""
token_score = self.bert_model.perturb_bert(sent, num_of_perturb, adj_index)
new_words = list(token_score.keys())
intersection = list(set(new_words) & set(self.adj_list))
intersection = [(a, token_score[a]) for a in intersection]
self.noun_to_adj[word].extend(intersection)
def add_nouns(self, sent, num_of_perturb, noun_index, word):
"""Ask bert for suggestions for more nouns and add their intersection with nouns
list to the dictionary.
Args:
sent : The sentence for which use bert to find more adjectives.
num_of_perturb : Number of perturbations you want to make for a word in a sentence
adj_index : The index of the word need to be perturbed in the sentence.
word : The noun for which we are looking for adjectives
"""
token_score = self.bert_model.perturb_bert(sent, num_of_perturb, noun_index)
new_words = list(token_score.keys())
for n_word in new_words:
if n_word in self.noun_list:
self.noun_to_adj[n_word].append((word, token_score[n_word]))
|
apache-2.0
| 7,461,075,055,532,946,000
| 47.41573
| 90
| 0.600743
| false
| 3.682051
| false
| false
| false
|
globus-labs/ripple
|
ripple/observers/ipc/ipc_observer.py
|
1
|
5264
|
import os
import time
import subprocess
import sys
import uuid
import json
import re
from ripple.observers.base_observer import BaseObserver
from ripple import logger, RippleConfig
class IPCObserver(BaseObserver):
"""
Set up the polling IPC monitor. It will use the
"ipcs" command to query for shared memory segments
and will report those that have been created and removed.
"""
def monitor(self):
self.segments = {}
self.poll(True)
while True:
time.sleep(5)
self.poll(False)
def poll(self, start=False):
"""
Use the ipcs command to get memory events and compare
them against active rules.
"""
segments = self.get_segments(start)
events = self.process_segments(segments)
print (events)
# now process the events against rules
for event in events:
self.check_rules(event)
def check_rules(self, event):
"""
Try to match a rule to this event. If nothing is found, return None
They look like this:
key shmid owner perms bytes nattch status
0x00000000 262145 ryan 600 393216 2 dest
"""
logger.debug("Checking rules")
# Iterate through rules and try to apply them
for rule in RippleConfig().rules[:]:
event_type = event['type']
if self.match_condition(event_type, rule):
# Currently putting in pathname as key, need to
# think of a better way to handle "other" information
send_event = {'event': {
'type': event_type,
'size': event['bytes'],
'key': event['key'],
'pathname': event['key'],
'path': event['key'],
'name': event['key'],
'shmid': event['shmid'],
'perms': event['perms'],
'owner': event['owner'],
'status': event['status'],
'uuid': str(uuid.uuid4()),
'hash': 'hashvalue'
}
}
print ("Sending event: %s" % send_event)
send_event.update(rule)
# Now push it down the queue
message = json.dumps(send_event)
RippleConfig().queue.put(message)
logger.debug("Sent data to queue")
return None
def match_condition(self, event_type, rule):
"""
Match the event against a rule's conditions.
"""
logger.debug("Matching rule conditions")
rule_event_type = rule['trigger']['event']
if event_type == rule_event_type:
logger.debug("Matching rule conditions: type MATCHED")
# Hm, might be worth adding perms, owner, status?
return True
return False
def process_segments(self, segments):
"""
Process the segments and return which are new and which have been removed.
"""
previous = dict(self.segments)
new = []
removed = []
for shmid, val in segments.items():
if shmid not in previous.keys():
new.append(val)
# update it in the global dict
self.segments[shmid] = val
else:
# it already existed, so ignore it
del previous[shmid]
for shmid, val in previous.items():
removed.append(val)
del self.segments[shmid]
# now convert these into events
events = []
for e in new:
e['type'] = 'Create'
if 'status' not in e:
e['status'] = 'other'
events.append(e)
for e in removed:
if 'status' not in e:
e['status'] = 'other'
e['type'] = 'Delete'
events.append(e)
return events
def get_segments(self, start=False):
"""
Use the icps command to get and return a dictionary of
segments.
"""
cmd = ["ipcs", "-a"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output, err = process.communicate()
output = output.decode("utf-8").split("\n")
keys = ['key', 'shmid', 'owner', 'perms', 'bytes', 'nattch',
'status']
segments = {}
for line in output:
# this should capture all keys
# note: it won't do queues vs mem vs sem etc.
if line[0:2] == '0x':
values = list(filter(None, line.split(" ")))
data = dict(zip(keys, values))
if start:
# print (data['shmid'])
self.segments[data['shmid']] = data
segments[data['shmid']] = data
return segments
def stop_monitoring(self):
"""
Terminate the monitor
"""
logger.debug("Terminating POSIX monitor.")
|
apache-2.0
| -8,401,639,659,101,003,000
| 31.695652
| 86
| 0.492781
| false
| 4.666667
| false
| false
| false
|
Crowdcomputer/CroCoAPI
|
crocoapi/settings.py
|
1
|
2668
|
"""
Django settings for CroCoAPI project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from settings_production import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
if DEBUG:
from settings_local import *
else:
from settings_production import *
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ui',
'api',
'rest_framework',
'rest_framework.authtoken',
'south',
'crispy_forms',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'crocoapi.urls'
WSGI_APPLICATION = 'crocoapi.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'CET'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.BasicAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
# 'rest_framework.authentication.TokenAuthentication',
'api.authentication.TokenAppAuthentication',
),
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
# 'DEFAULT_MODEL_SERIALIZER_CLASS':
# 'rest_framework.serializers.HyperlinkedModelSerializer',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
'api.permissions.IsOwner'
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
CRISPY_TEMPLATE_PACK='bootstrap'
|
gpl-2.0
| 2,834,945,857,428,308,500
| 25.415842
| 75
| 0.718891
| false
| 3.68
| false
| false
| false
|
sbrodehl/HashCode
|
Final Round/best_solution_in_the_wuuuuuuurld.py
|
1
|
19988
|
from random import shuffle
from skimage.morphology import skeletonize, medial_axis
from tqdm import tqdm
from scipy import signal
import scipy.ndimage.filters as fi
import pickle
import glob
import bz2
import multiprocessing
from multiprocessing import Pool
from functools import partial
from IO import *
from Utilities import compute_solution_score, wireless_access, quasi_euclidean_dist, chessboard_dist
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
def place_routers_on_skeleton(d, cmethod):
wireless = np.where(d["graph"] == Cell.Wireless, 1, 0)
# perform skeletonization
skeleton = skeletonize(wireless)
med_axis = medial_axis(wireless)
skel = skeleton
# skel = med_axis
# get all skeleton positions
pos = []
for i in range(skel.shape[0]):
for j in range(skel.shape[1]):
if skel[i][j]:
pos.append((i, j))
budget = d['budget']
shuffle(pos)
max_num_routers = min([int(d['budget'] / d['price_router']), len(pos)])
print("Num of routers constrained by:")
print(" budget: %d" % int(int(d['budget'] / d['price_router'])))
print(" skeleton: %d" % len(pos))
for i in tqdm(range(max_num_routers), desc="Placing Routers"):
new_router = pos[i]
a, b = new_router
# check if remaining budget is enough
d["graph"][a][b] = Cell.Router
d, ret, cost = _add_cabel(d, new_router, budget)
budget -= cost
if not ret:
break
return d
def place_routers_on_skeleton_iterative(d, cmethod):
budget = d['budget']
R = d['radius']
max_num_routers = int(d['budget'] / d['price_router'])
coverage = np.where(d["graph"] == Cell.Wireless, 1, 0).astype(np.bool)
pbar = tqdm(range(max_num_routers), desc="Placing Routers")
while budget > 0:
# perform skeletonization
# skeleton = skeletonize(coverage)
skeleton = medial_axis(coverage)
# get all skeleton positions
pos = np.argwhere(skeleton > 0).tolist()
# escape if no positions left
if not len(pos):
break
# get a random position
shuffle(pos)
a, b = pos[0]
# place router
d["graph"][a][b] = Cell.Router
d, ret, cost = _add_cabel(d, (a, b), budget)
if not ret:
print("No budget available!")
break
budget -= cost
# refresh wireless map by removing new coverage
m = wireless_access(a, b, R, d['graph']).astype(np.bool)
coverage[(a - R):(a + R + 1), (b - R):(b + R + 1)] &= ~m
pbar.update()
pbar.close()
return d
def place_routers_randomized(d, cmethod):
max_num_routers = int(d['budget'] / d['price_router'])
wireless = np.where(d["graph"] == Cell.Wireless, 0, 1)
print("Num of routers constrained by:")
print(" budget: %d" % int(int(d['budget'] / d['price_router'])))
budget = d['budget']
R = d['radius']
if cmethod == 'mst':
cost, succ, routers, idx, idy, dists = _mst(d, d['backbone'])
pbar = tqdm(range(max_num_routers), desc="Placing Routers")
for i in pbar:
# generate random position for router
indices = np.argwhere(wireless == 0).tolist()
x, y = indices[np.random.randint(0, len(indices))]
if len(indices) == 0:
pbar.close()
print("No more suitable positions left!")
return d
# modify graph
if cmethod == 'bfs':
d["graph"][x][y] = Cell.Router
d, ret, cost = _add_cabel(d, (x, y), budget)
if ret:
budget -= cost
# refresh wireless map by removing new coverage
mask = wireless_access(x, y, R, d['graph'])
wireless[(x - R):(x + R + 1), (y - R):(y + R + 1)] |= mask.astype(np.bool)
else:
# no more budget left
pbar.close()
print("No budget available!")
return d
elif cmethod == 'mst':
tmp = d["graph"][x][y]
d["graph"][x][y] = Cell.Router
cost, succ, routers, idx, idy, dists = _mst(d, (x, y), routers, idx, idy, dists)
if succ and i < 10:
mask = wireless_access(x, y, R, d['graph'])
wireless[(x - R):(x + R + 1), (y - R):(y + R + 1)] |= mask.astype(np.bool)
else:
# reverse last router
d["graph"][x][y] = tmp
d = _place_mst_paths(d, routers, idx, idy, dists)
pbar.close()
print("No budget available!")
return d
pbar.update(max_num_routers)
return d
def _parallel_helper(position, radius, graph, offset=(0, 0)):
a, b = position
ux_min, uy_min = offset
a, b = a + ux_min, b + uy_min
mask = wireless_access(a, b, radius, graph)
return a, b, np.sum(np.nan_to_num(mask)), mask
def _parallel_counting_helper(position, radius, graph, scoring, offset=(0, 0)):
a, b = position
ux_min, uy_min = offset
a, b = a + ux_min, b + uy_min
mask = wireless_access(a, b, radius, graph)
wx_min, wx_max = np.max([0, (a - radius)]), np.min([scoring.shape[0], (a + radius + 1)])
wy_min, wy_max = np.max([0, (b - radius)]), np.min([scoring.shape[1], (b + radius + 1)])
# get the submask which is valid
dx, lx = np.abs(wx_min - (a - radius)), wx_max - wx_min
dy, ly = np.abs(wy_min - (b - radius)), wy_max - wy_min
return a, b, np.sum(np.multiply(scoring[wx_min:wx_max, wy_min:wy_max], np.nan_to_num(mask[dx:dx + lx, dy:dy + ly])))
def place_routers_randomized_by_score(d, cmethod):
# some constants
max_num_routers = int(d['budget'] / d['price_router'])
budget = d['budget']
R = d['radius']
wireless = np.where(d["graph"] == Cell.Wireless, 1, 0).astype(np.int8)
scoring = np.zeros(wireless.shape, dtype=np.float32) - 1
counting = np.zeros_like(scoring)
coverage = {}
print("Num of routers constrained by:")
print(" budget: %d" % max_num_routers)
fscore = d['name'] + ".scores"
fcov = d['name'] + ".coverage"
facc = d['name'] + ".counting"
compute_stuff = False
sample_files = glob.glob('output/' + facc)
if len(sample_files) and not compute_stuff:
print("Found accounting file.")
counting = pickle.load(bz2.BZ2File(sample_files[0], 'r'))
else:
compute_stuff = True
sample_files = glob.glob('output/' + fscore)
if len(sample_files) and not compute_stuff:
print("Found scoring file.")
scoring = pickle.load(bz2.BZ2File(sample_files[0], 'r'))
else:
compute_stuff = True
sample_files = glob.glob('output/' + fcov)
if len(sample_files) and not compute_stuff:
print("Found coverage file.")
coverage = pickle.load(bz2.BZ2File(sample_files[0], 'r'))
else:
compute_stuff = True
if compute_stuff:
# compute initial scoring, which will be updated during placing
positions = np.argwhere(wireless > 0).tolist()
# start worker processes
with Pool(processes=multiprocessing.cpu_count()) as pool:
for a, b, s, m in pool.imap_unordered(partial(_parallel_helper, radius=R, graph=d['original']), positions):
scoring[a][b] = s
coverage[(a, b)] = m
# start worker processes
with Pool(processes=multiprocessing.cpu_count()) as pool:
for a, b, s in pool.imap_unordered(
partial(_parallel_counting_helper, radius=R, graph=wireless, scoring=scoring), positions):
counting[a][b] = s
print("Saving scoring file.")
# save scoring to disk
pickle.dump(scoring, bz2.BZ2File('output/' + fscore, 'w'), pickle.HIGHEST_PROTOCOL)
print("Saving coverage file.")
# save coverage to disk
pickle.dump(coverage, bz2.BZ2File('output/' + fcov, 'w'), pickle.HIGHEST_PROTOCOL)
print("Saving counting file.")
# save coverage to disk
pickle.dump(counting, bz2.BZ2File('output/' + facc, 'w'), pickle.HIGHEST_PROTOCOL)
routers = []
idx, idy, dists = [], [], []
if cmethod == 'mst':
placed, cost, routers, idx, idy, dists = _mst(d, d['backbone'])
# choose routers by score and place them!
pbar = tqdm(range(max_num_routers), desc="Placing Routers")
while budget > 0:
placement = None
max_score = scoring.max()
if max_score > 0:
possible_placements = np.argwhere(scoring == max_score).tolist()
score_count = {}
for pp in possible_placements:
score_count[(pp[0], pp[1])] = counting[pp[0]][pp[1]]
sorted_scores = sorted(score_count)
placement = next(iter(sorted_scores or []), None)
if placement is None:
print("No positions available!")
break
# update progress bar
pbar.update()
x, y = placement
cost = 0
placed = False
if cmethod == 'mst':
tmp = d["graph"][x][y]
d["graph"][x][y] = Cell.Router
placed, nbud, routers, idx, idy, dists = _mst(d, (x, y), routers, idx, idy, dists)
budget = d['budget'] - nbud
if not placed:
d["graph"][x][y] = tmp
routers = routers[:-1]
idx, idy, dists = idx[:-len(routers)], idy[:-len(routers)], dists[:-len(routers)]
else:
# bfs as default
# modify graph, add router and cables
d["graph"][x][y] = Cell.Router
d, placed, cost = _add_cabel(d, (x, y), budget)
# check if new path is not to expensive
if not placed:
print("No budget available!")
break
# update budget
budget -= cost
# prepare coverage and scoring for next round
# remove score for current router
wx_min, wx_max = np.max([0, (x - R)]), np.min([wireless.shape[0], (x + R + 1)])
wy_min, wy_max = np.max([0, (y - R)]), np.min([wireless.shape[1], (y + R + 1)])
# get the submask which is valid
dx, lx = np.abs(wx_min - (x - R)), wx_max - wx_min
dy, ly = np.abs(wy_min - (y - R)), wy_max - wy_min
# remove coverage from map
wireless[wx_min:wx_max, wy_min:wy_max] &= ~(coverage[(x, y)][dx:dx + lx, dy:dy + ly].astype(np.bool))
# nullify scores
scoring[wx_min:wx_max, wy_min:wy_max] = -1
ux_min, uy_min = np.max([0, (x - 2 * R)]), np.max([0, (y - 2 * R)])
ux_max, uy_max = np.min([wireless.shape[0], (x + 2 * R + 1)]), np.min([wireless.shape[1], (y + 2 * R + 1)])
# compute places to be updated
updating = wireless[ux_min:ux_max, uy_min:uy_max]
# get all position coordinates
positions = np.argwhere(updating).tolist()
# start worker processes
with Pool(processes=multiprocessing.cpu_count()) as pool:
for a, b, s, m in pool.imap_unordered(
partial(_parallel_helper, radius=R, graph=wireless, offset=(ux_min, uy_min)), positions):
scoring[a][b] = s
# start worker processes
with Pool(processes=multiprocessing.cpu_count()) as pool:
for a, b, s in pool.imap_unordered(
partial(_parallel_counting_helper, radius=R, graph=wireless, scoring=scoring,
offset=(ux_min, uy_min)), positions):
counting[a][b] = s
counting = np.multiply(counting, wireless)
# budget looks good, place all cables
if cmethod == 'mst':
d = _place_mst_paths(d, routers, idx, idy, dists)
pbar.close()
return d
def place_routers_by_convolution(d, cmethod):
max_num_routers = int(d['budget'] / d['price_router'])
# wireless = np.where(d["graph"] == Cell.Wireless, 1, 0).astype(np.float64)
wireless = np.where(d["graph"] == Cell.Wireless, 1, -1).astype(np.float64)
walls = np.where(d['graph'] <= Cell.Wall, 0, 1).astype(np.float64)
print("Num of routers constrained by:")
print(" budget: %d" % int(int(d['budget'] / d['price_router'])))
budget = d['budget']
R = d['radius']
r21 = 2 * R + 1
stdev = 6.6
# kernel = np.ones((2*R+1, 2*R+1))
# kernel = (_gkern2(2 * R + 1, 2) * 1e2)
kernel = (np.outer(signal.gaussian(r21, stdev), signal.gaussian(r21, stdev))).astype(np.float32)
pbar = tqdm(range(max_num_routers), desc="Placing Routers")
while budget > 0:
# convolve
mat = signal.fftconvolve(wireless, kernel, mode='same')
found = False
while not found:
# get the max of the conv matrix
mat_max = mat.max()
max_positions = np.argwhere(mat == mat_max).tolist()
selected_pos = max_positions[np.random.randint(0, len(max_positions))]
# check if we have suitable positions left
if mat_max == -np.inf:
pbar.close()
print("No more suitable positions left!")
return d
x, y = selected_pos
# max can be on a wall position... ignore it
if d['graph'][x][y] <= Cell.Wall:
# pbar.write('> Optimal position on wall cell...')
mat[x][y] = -np.inf
else:
found = True
# update progress bar
pbar.update()
# modify graph
d["graph"][x][y] = Cell.Router
d, ret, cost = _add_cabel(d, (x, y), budget)
# check if new path is not to expensive
if ret:
budget -= cost
# refresh wireless map by removing new coverage
mask = wireless_access(x, y, R, d['graph'])
# wireless[(a - R):(a + R + 1), (b - R):(b + R + 1)] &= ~mask.astype(np.bool)
# wireless[(x - R):(x + R + 1), (y - R):(y + R + 1)] -= kernel
wireless[(x - R):(x + R + 1), (y - R):(y + R + 1)] = -1.0
else:
# we've not enough budget
pbar.close()
print("No budget available!")
return d
pbar.close()
return d
def _mst(d, new_router, routers=[], idx=[], idy=[], dists=[]):
new_id = len(routers)
# calc new router dists
for i, a in enumerate(routers):
dist = chessboard_dist(a, new_router)
if dist > 0:
idx.append(i)
idy.append(new_id)
dists.append(dist)
# add new router
routers.append(new_router)
# create matrix
mat = csr_matrix((dists, (idx, idy)), shape=(len(routers), len(routers)))
# minimal spanning tree
Tmat = minimum_spanning_tree(mat)
# check costs
cost = np.sum(Tmat) * d['price_backbone'] + (len(routers) - 1) * d['price_router']
succ = cost <= d['original_budget']
# return
return succ, cost, routers, idx, idy, dists
def find_chess_connection(a, b):
cables = []
dx, dy = np.abs(a[0] - b[0]) + 1, np.abs(a[1] - b[1]) + 1
xmin, ymin = np.min([a[0], b[0]]), np.min([a[1], b[1]])
path = np.zeros((dx, dy), dtype=np.bool)
path[a[0] - xmin][a[1] - ymin] = True
path[b[0] - xmin][b[1] - ymin] = True
r = [dx, dy]
amin = np.argmin(r)
flipped = False
if not path[0][0]:
path = np.flipud(path)
flipped = True
# set diagonal elements
for i in range(r[amin]):
path[i][i] = True
# set remaining straight elements
if amin == 0:
for i in range(np.abs(dx - dy)):
path[-1][r[amin] + i] = True
elif amin == 1:
for i in range(np.abs(dx - dy)):
path[r[amin] + i][-1] = True
if flipped:
path = np.flipud(path)
# select cables
for i, row in enumerate(path):
for j, col in enumerate(row):
if path[i][j]:
cables.append((i + xmin, j + ymin))
return cables
def find_connection(router_from, router_to):
cables = []
if router_from[0] < router_to[0]:
xr = range(router_from[0], router_to[0] + 1)
else:
xr = range(router_from[0], router_to[0] - 1, -1)
if router_from[1] < router_to[1]:
yr = range(router_from[1], router_to[1] + 1)
else:
yr = range(router_from[1], router_to[1] - 1, -1)
for x1 in xr:
cables.append((x1, router_from[1]))
for y1 in yr:
cables.append((router_to[0], y1))
return cables
def _place_mst_paths(d, routers, idx, idy, dists):
# calc mst
mat = csr_matrix((dists, (idx, idy)), shape=(len(routers), len(routers)))
Tmat = minimum_spanning_tree(mat).toarray()
# place cabels
for i, r in enumerate(Tmat):
for j, c in enumerate(r):
if Tmat[i, j] > 0:
cables = find_chess_connection(routers[i], routers[j])
for cable in cables:
if cable == d['backbone']:
continue
if d['graph'][cable] == Cell.Router:
d['graph'][cable] = Cell.ConnectedRouter
else:
d['graph'][cable] = Cell.Cable
for router in routers:
if router == d['backbone']:
continue
d['graph'][router] = Cell.ConnectedRouter
return d
def _add_cabel(d, new_router, remaining_budget):
path = _bfs(d, new_router)
cost = len(path) * d['price_backbone'] + d['price_router']
if cost <= remaining_budget:
for c in path:
if d['graph'][c] == Cell.Router:
d['graph'][c] = Cell.ConnectedRouter
else:
d['graph'][c] = Cell.Cable
return d, True, cost
return d, False, cost
def _bfs(d, start):
dx = [0, -1, 1]
dy = [0, -1, 1]
visited = np.zeros((d['height'], d['width']), dtype=np.bool)
parent = (np.zeros((d['height'], d['width']), dtype=np.int32) - 1).tolist()
queue = deque()
queue.append(start)
visited[start[0]][start[1]] = True
while queue:
cur = queue.popleft()
# check goal condition
if d['graph'][cur] >= Cell.ConnectedRouter or cur == d['backbone']:
# generate path from parent array
path = []
a = cur
while a != start:
path.append(a)
a = parent[a[0]][a[1]]
path.append(a)
return path[1:]
# add children
# check neighbors
for ddx in dx:
for ddy in dy:
if ddx == 0 and ddy == 0:
continue
child_x, child_y = cur[0] + ddx, cur[1] + ddy
# only if still in the grid
if 0 <= child_x < d['height'] and 0 <= child_y < d['width']:
child = (child_x, child_y)
# everything is "walkable" cells
if not visited[child[0]][child[1]]:
queue.append(child)
visited[child[0]][child[1]] = True
parent[child[0]][child[1]] = cur
return None
def _gkern2(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel array."""
# create nxn zeros
inp = np.zeros((kernlen, kernlen))
# set element at the middle to one, a dirac delta
inp[kernlen // 2, kernlen // 2] = 1
# gaussian-smooth the dirac, resulting in a gaussian filter mask
return fi.gaussian_filter(inp, nsig)
if __name__ == '__main__':
D = read_dataset('input/example.in')
budget = D['budget']
routers = [(3, 6), (3, 9)]
for r in routers:
# set routers
D['graph'][r[0], r[1]] = Cell.Router
D, placed, cost = _add_cabel(D, r, budget)
if not placed:
print("No budget available!")
break
budget -= cost
score = compute_solution_score(D)
print(score)
write_solution('output/example.out', D)
|
apache-2.0
| 1,336,552,385,937,426,200
| 32.092715
| 120
| 0.537422
| false
| 3.372364
| false
| false
| false
|
dcondrey/scrapy-spiders
|
newenglandfilm/newengland/spiders/newenglandfilm.py
|
1
|
1830
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy.crawler import Crawler
from scrapy.http import Request
from scrapy import signals
from scrapy.utils.project import get_project_settings
from my_settings import name_file, test_mode, difference_days
from twisted.internet import reactor
from datetime import datetime, timedelta
import re
print "Run spider NewenglandFilm"
file_output = open(name_file, 'a')
email_current_session = []
email_in_file = open(name_file, 'r').readlines()
if test_mode:
current_date = (datetime.today() - timedelta(days=difference_days)).strftime('%m/%d/%Y')
else:
current_date = datetime.today().strftime('%m/%d/%Y')
class NewenglandFilm(Spider):
name = 'newenglandfilm'
allowed_domains = ["newenglandfilm.com"]
start_urls = ["http://newenglandfilm.com/jobs.htm"]
def parse(self, response):
sel = Selector(response)
for num_div in xrange(1, 31):
date = sel.xpath('//*[@id="mainContent"]/div[{0}]/span/text()'.format(str(num_div))).re('(\d{1,2}\/\d{1,2}\/\d{4})')[0]
email = sel.xpath('//*[@id="mainContent"]/div[{0}]/div/text()'.format(str(num_div))).re('(\w+@[a-zA-Z0-9_]+?\.[a-zA-Z]{2,6})')
if current_date == date:
for address in email:
if address + "\n" not in email_in_file and address not in email_current_session:
file_output.write(address + "\n")
email_current_session.append(address)
print "Spider: NewenglandFilm. Email {0} added to file".format(address)
else:
print "Spider: NewenglandFilm. Email {0} already in the file".format(address)
|
mit
| 808,298,491,493,015,200
| 37.826087
| 138
| 0.60929
| false
| 3.376384
| false
| false
| false
|
astrobin/astrobin
|
astrobin_apps_payments/api/views/pricing_view.py
|
1
|
1570
|
import logging
from braces.views import JSONResponseMixin
from django.conf import settings
from django.http import HttpResponseBadRequest
from django.views import View
from rest_framework.authtoken.models import Token
from astrobin_apps_payments.services.pricing_service import PricingService
log = logging.getLogger('apps')
class PricingView(JSONResponseMixin, View):
def get(self, request, *args, **kwargs):
product = kwargs.pop('product', None) # type: str
currency = kwargs.pop('currency', None) # type: str
if product is None or product.lower() not in ('lite', 'premium', 'ultimate'):
log.error('pricing_view: invalid product: %s' % product)
return HttpResponseBadRequest("Invalid product")
if currency is None or currency.upper() not in settings.SUPPORTED_CURRENCIES:
log.error('pricing_view: invalid currency: %s' % currency)
return HttpResponseBadRequest("Unsupported currency")
user = None
if 'HTTP_AUTHORIZATION' in request.META:
token_in_header = request.META['HTTP_AUTHORIZATION'].replace('Token ', '')
token = Token.objects.get(key=token_in_header)
user = token.user
return self.render_json_response({
'fullPrice': PricingService.get_full_price(product.lower(), currency.upper()),
'discount': PricingService.get_discount_amount(product.lower(), currency.upper(), user=user),
'price': PricingService.get_price(product.lower(), currency.upper(), user=user)
})
|
agpl-3.0
| -8,189,497,786,092,724,000
| 41.432432
| 105
| 0.67707
| false
| 4.077922
| false
| false
| false
|
amitjamadagni/sympy
|
sympy/matrices/tests/test_sparse.py
|
2
|
16649
|
from sympy import S, Symbol, I, Rational, PurePoly
from sympy.matrices import Matrix, SparseMatrix, eye, zeros, ShapeError
from sympy.utilities.pytest import raises, XFAIL
def test_sparse_matrix():
def sparse_eye(n):
return SparseMatrix.eye(n)
def sparse_zeros(n):
return SparseMatrix.zeros(n)
# creation args
raises(TypeError, lambda: SparseMatrix(1, 2))
a = SparseMatrix((
(1, 0),
(0, 1)
))
assert SparseMatrix(a) == a
# test element assignment
a = SparseMatrix((
(1, 0),
(0, 1)
))
a[3] = 4
assert a[1, 1] == 4
a[3] = 1
a[0, 0] = 2
assert a == SparseMatrix((
(2, 0),
(0, 1)
))
a[1, 0] = 5
assert a == SparseMatrix((
(2, 0),
(5, 1)
))
a[1, 1] = 0
assert a == SparseMatrix((
(2, 0),
(5, 0)
))
assert a._smat == {(0, 0): 2, (1, 0): 5}
# test_multiplication
a = SparseMatrix((
(1, 2),
(3, 1),
(0, 6),
))
b = SparseMatrix((
(1, 2),
(3, 0),
))
c = a*b
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
x = Symbol("x")
c = b * Symbol("x")
assert isinstance(c, SparseMatrix)
assert c[0, 0] == x
assert c[0, 1] == 2*x
assert c[1, 0] == 3*x
assert c[1, 1] == 0
c = 5 * b
assert isinstance(c, SparseMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
#test_power
A = SparseMatrix([[2, 3], [4, 5]])
assert (A**5)[:] == [6140, 8097, 10796, 14237]
A = SparseMatrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]])
assert (A**3)[:] == [290, 262, 251, 448, 440, 368, 702, 954, 433]
# test_creation
x = Symbol("x")
a = SparseMatrix([[x, 0], [0, 0]])
m = a
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
b = SparseMatrix(2, 2, [x, 0, 0, 0])
m = b
assert m.cols == m.rows
assert m.cols == 2
assert m[:] == [x, 0, 0, 0]
assert a == b
S = sparse_eye(3)
S.row_del(1)
assert S == SparseMatrix([
[1, 0, 0],
[0, 0, 1]])
S = sparse_eye(3)
S.col_del(1)
assert S == SparseMatrix([
[1, 0],
[0, 0],
[0, 1]])
S = SparseMatrix.eye(3)
S[2, 1] = 2
S.col_swap(1, 0)
assert S == SparseMatrix([
[0, 1, 0],
[1, 0, 0],
[2, 0, 1]])
a = SparseMatrix(1, 2, [1, 2])
b = a.copy()
c = a.copy()
assert a[0] == 1
a.row_del(0)
assert a == SparseMatrix(0, 2, [])
b.col_del(1)
assert b == SparseMatrix(1, 1, [1])
# test_determinant
x, y = Symbol('x'), Symbol('y')
assert SparseMatrix(1, 1, [0]).det() == 0
assert SparseMatrix([[1]]).det() == 1
assert SparseMatrix(((-3, 2), (8, -5))).det() == -1
assert SparseMatrix(((x, 1), (y, 2*y))).det() == 2*x*y - y
assert SparseMatrix(( (1, 1, 1),
(1, 2, 3),
(1, 3, 6) )).det() == 1
assert SparseMatrix(( ( 3, -2, 0, 5),
(-2, 1, -2, 2),
( 0, -2, 5, 0),
( 5, 0, 3, 4) )).det() == -289
assert SparseMatrix(( ( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16) )).det() == 0
assert SparseMatrix(( (3, 2, 0, 0, 0),
(0, 3, 2, 0, 0),
(0, 0, 3, 2, 0),
(0, 0, 0, 3, 2),
(2, 0, 0, 0, 3) )).det() == 275
assert SparseMatrix(( (1, 0, 1, 2, 12),
(2, 0, 1, 1, 4),
(2, 1, 1, -1, 3),
(3, 2, -1, 1, 8),
(1, 1, 1, 0, 6) )).det() == -55
assert SparseMatrix(( (-5, 2, 3, 4, 5),
( 1, -4, 3, 4, 5),
( 1, 2, -3, 4, 5),
( 1, 2, 3, -2, 5),
( 1, 2, 3, 4, -1) )).det() == 11664
assert SparseMatrix(( ( 2, 7, -1, 3, 2),
( 0, 0, 1, 0, 1),
(-2, 0, 7, 0, 2),
(-3, -2, 4, 5, 3),
( 1, 0, 0, 0, 1) )).det() == 123
# test_submatrix
m0 = sparse_eye(4)
assert m0[:3, :3] == sparse_eye(3)
assert m0[2:4, 0:2] == sparse_zeros(2)
m1 = SparseMatrix(3, 3, lambda i, j: i + j)
assert m1[0, :] == SparseMatrix(1, 3, (0, 1, 2))
assert m1[1:3, 1] == SparseMatrix(2, 1, (2, 3))
m2 = SparseMatrix(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
assert m2[:, -1] == SparseMatrix(4, 1, [3, 7, 11, 15])
assert m2[-2:, :] == SparseMatrix([[8, 9, 10, 11], [12, 13, 14, 15]])
assert SparseMatrix([[1, 2], [3, 4]]).submatrix([1, 1]) == Matrix([[4]])
# test_submatrix_assignment
m = sparse_zeros(4)
m[2:4, 2:4] = sparse_eye(2)
assert m == SparseMatrix([(0, 0, 0, 0),
(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1)])
assert len(m._smat) == 2
m[:2, :2] = sparse_eye(2)
assert m == sparse_eye(4)
m[:, 0] = SparseMatrix(4, 1, (1, 2, 3, 4))
assert m == SparseMatrix([(1, 0, 0, 0),
(2, 1, 0, 0),
(3, 0, 1, 0),
(4, 0, 0, 1)])
m[:, :] = sparse_zeros(4)
assert m == sparse_zeros(4)
m[:, :] = ((1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12), (13, 14, 15, 16))
assert m == SparseMatrix((( 1, 2, 3, 4),
( 5, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16)))
m[:2, 0] = [0, 0]
assert m == SparseMatrix((( 0, 2, 3, 4),
( 0, 6, 7, 8),
( 9, 10, 11, 12),
(13, 14, 15, 16)))
# test_reshape
m0 = sparse_eye(3)
assert m0.reshape(1, 9) == SparseMatrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1))
m1 = SparseMatrix(3, 4, lambda i, j: i + j)
assert m1.reshape(4, 3) == \
SparseMatrix([(0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5)])
assert m1.reshape(2, 6) == \
SparseMatrix([(0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5)])
# test_applyfunc
m0 = sparse_eye(3)
assert m0.applyfunc(lambda x: 2*x) == sparse_eye(3)*2
assert m0.applyfunc(lambda x: 0 ) == sparse_zeros(3)
# test_LUdecomp
testmat = SparseMatrix([[ 0, 2, 5, 3],
[ 3, 3, 7, 4],
[ 8, 4, 0, 2],
[-2, 6, 3, 4]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - testmat == sparse_zeros(4)
testmat = SparseMatrix([[ 6, -2, 7, 4],
[ 0, 3, 6, 7],
[ 1, -2, 7, 4],
[-9, 2, 6, 3]])
L, U, p = testmat.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - testmat == sparse_zeros(4)
x, y, z = Symbol('x'), Symbol('y'), Symbol('z')
M = Matrix(((1, x, 1), (2, y, 0), (y, 0, z)))
L, U, p = M.LUdecomposition()
assert L.is_lower
assert U.is_upper
assert (L*U).permuteBkwd(p) - M == sparse_zeros(3)
# test_LUsolve
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[8, 3, 6]])
x = SparseMatrix(3, 1, [3, 7, 5])
b = A*x
soln = A.LUsolve(b)
assert soln == x
A = SparseMatrix([[0, -1, 2],
[5, 10, 7],
[8, 3, 4]])
x = SparseMatrix(3, 1, [-1, 2, 5])
b = A*x
soln = A.LUsolve(b)
assert soln == x
# test_inverse
A = sparse_eye(4)
assert A.inv() == sparse_eye(4)
assert A.inv(method="CH") == sparse_eye(4)
assert A.inv(method="LDL") == sparse_eye(4)
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[7, 2, 6]])
Ainv = SparseMatrix(Matrix(A).inv())
assert A*Ainv == sparse_eye(3)
assert A.inv(method="CH") == Ainv
assert A.inv(method="LDL") == Ainv
A = SparseMatrix([[2, 3, 5],
[3, 6, 2],
[5, 2, 6]])
Ainv = SparseMatrix(Matrix(A).inv())
assert A*Ainv == sparse_eye(3)
assert A.inv(method="CH") == Ainv
assert A.inv(method="LDL") == Ainv
# test_cross
v1 = Matrix(1, 3, [1, 2, 3])
v2 = Matrix(1, 3, [3, 4, 5])
assert v1.cross(v2) == Matrix(1, 3, [-2, 4, -2])
assert v1.norm(2)**2 == 14
# conjugate
a = SparseMatrix(((1, 2 + I), (3, 4)))
assert a.C == SparseMatrix([
[1, 2 - I],
[3, 4]
])
# mul
assert a*Matrix(2, 2, [1, 0, 0, 1]) == a
assert a + Matrix(2, 2, [1, 1, 1, 1]) == SparseMatrix([
[2, 3 + I],
[4, 5]
])
# col join
assert a.col_join(sparse_eye(2)) == SparseMatrix([
[1, 2 + I],
[3, 4],
[1, 0],
[0, 1]
])
# symmetric
assert not a.is_symmetric(simplify=False)
# test_cofactor
assert sparse_eye(3) == sparse_eye(3).cofactorMatrix()
test = SparseMatrix([[1, 3, 2], [2, 6, 3], [2, 3, 6]])
assert test.cofactorMatrix() == \
SparseMatrix([[27, -6, -6], [-12, 2, 3], [-3, 1, 0]])
test = SparseMatrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert test.cofactorMatrix() == \
SparseMatrix([[-3, 6, -3], [6, -12, 6], [-3, 6, -3]])
# test_jacobian
x = Symbol('x')
y = Symbol('y')
L = SparseMatrix(1, 2, [x**2*y, 2*y**2 + x*y])
syms = [x, y]
assert L.jacobian(syms) == Matrix([[2*x*y, x**2], [y, 4*y + x]])
L = SparseMatrix(1, 2, [x, x**2*y**3])
assert L.jacobian(syms) == SparseMatrix([[1, 0], [2*x*y**3, x**2*3*y**2]])
# test_QR
A = Matrix([[1, 2], [2, 3]])
Q, S = A.QRdecomposition()
R = Rational
assert Q == Matrix([
[ 5**R(-1, 2), (R(2)/5)*(R(1)/5)**R(-1, 2)],
[2*5**R(-1, 2), (-R(1)/5)*(R(1)/5)**R(-1, 2)]])
assert S == Matrix([
[5**R(1, 2), 8*5**R(-1, 2)],
[ 0, (R(1)/5)**R(1, 2)]])
assert Q*S == A
assert Q.T * Q == sparse_eye(2)
R = Rational
# test nullspace
# first test reduced row-ech form
M = SparseMatrix([[5, 7, 2, 1],
[1, 6, 2, -1]])
out, tmp = M.rref()
assert out == Matrix([[1, 0, -R(2)/23, R(13)/23],
[0, 1, R(8)/23, R(-6)/23]])
M = SparseMatrix([[ 1, 3, 0, 2, 6, 3, 1],
[-2, -6, 0, -2, -8, 3, 1],
[ 3, 9, 0, 0, 6, 6, 2],
[-1, -3, 0, 1, 0, 9, 3]])
out, tmp = M.rref()
assert out == Matrix([[1, 3, 0, 0, 2, 0, 0],
[0, 0, 0, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 1, R(1)/3],
[0, 0, 0, 0, 0, 0, 0]])
# now check the vectors
basis = M.nullspace()
assert basis[0] == Matrix([-3, 1, 0, 0, 0, 0, 0])
assert basis[1] == Matrix([0, 0, 1, 0, 0, 0, 0])
assert basis[2] == Matrix([-2, 0, 0, -2, 1, 0, 0])
assert basis[3] == Matrix([0, 0, 0, 0, 0, R(-1)/3, 1])
# test eigen
x = Symbol('x')
y = Symbol('y')
sparse_eye3 = sparse_eye(3)
assert sparse_eye3.charpoly(x) == PurePoly(((x - 1)**3))
assert sparse_eye3.charpoly(y) == PurePoly(((y - 1)**3))
# test values
M = Matrix([( 0, 1, -1),
( 1, 1, 0),
(-1, 0, 1)])
vals = M.eigenvals()
assert sorted(vals.keys()) == [-1, 1, 2]
R = Rational
M = Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
assert M.eigenvects() == [(1, 3, [
Matrix([1, 0, 0]),
Matrix([0, 1, 0]),
Matrix([0, 0, 1])])]
M = Matrix([[5, 0, 2],
[3, 2, 0],
[0, 0, 1]])
assert M.eigenvects() == [(1, 1, [Matrix([R(-1)/2, R(3)/2, 1])]),
(2, 1, [Matrix([0, 1, 0])]),
(5, 1, [Matrix([1, 1, 0])])]
assert M.zeros(3, 5) == SparseMatrix(3, 5, {})
def test_transpose():
assert SparseMatrix(((1, 2), (3, 4))).transpose() == \
SparseMatrix(((1, 3), (2, 4)))
def test_trace():
assert SparseMatrix(((1, 2), (3, 4))).trace() == 5
assert SparseMatrix(((0, 0), (0, 4))).trace() == 4
def test_CL_RL():
assert SparseMatrix(((1, 2), (3, 4))).row_list() == \
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
assert SparseMatrix(((1, 2), (3, 4))).col_list() == \
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
def test_add():
assert SparseMatrix(((1, 0), (0, 1))) + SparseMatrix(((0, 1), (1, 0))) == \
SparseMatrix(((1, 1), (1, 1)))
a = SparseMatrix(100, 100, lambda i, j: int(j != 0 and i % j == 0))
b = SparseMatrix(100, 100, lambda i, j: int(i != 0 and j % i == 0))
assert (len(a._smat) + len(b._smat) - len((a + b)._smat) > 0)
def test_errors():
raises(ValueError, lambda: SparseMatrix(1.4, 2, lambda i, j: 0))
raises(TypeError, lambda: SparseMatrix([1, 2, 3], [1, 2]))
raises(ValueError, lambda: SparseMatrix([[1, 2], [3, 4]])[(1, 2, 3)])
raises(IndexError, lambda: SparseMatrix([[1, 2], [3, 4]])[5])
raises(ValueError, lambda: SparseMatrix([[1, 2], [3, 4]])[1, 2, 3])
raises(TypeError,
lambda: SparseMatrix([[1, 2], [3, 4]]).copyin_list([0, 1], set([])))
raises(
IndexError, lambda: SparseMatrix([[1, 2], [3, 4]]).submatrix((1, 2)))
raises(TypeError, lambda: SparseMatrix([1, 2, 3]).cross(1))
raises(IndexError, lambda: SparseMatrix(1, 2, [1, 2])[3])
raises(ShapeError,
lambda: SparseMatrix(1, 2, [1, 2]) + SparseMatrix(2, 1, [2, 1]))
def test_len():
assert not SparseMatrix()
assert SparseMatrix() == SparseMatrix([])
@XFAIL
def test_len_different_shapes():
assert Matrix() == Matrix([[]])
assert SparseMatrix() == SparseMatrix([[]])
def test_sparse_zeros_sparse_eye():
assert SparseMatrix.eye(3) == eye(3, cls=SparseMatrix)
assert len(SparseMatrix.eye(3)._smat) == 3
assert SparseMatrix.zeros(3) == zeros(3, cls=SparseMatrix)
assert len(SparseMatrix.zeros(3)._smat) == 0
def test_copyin():
s = SparseMatrix(3, 3, {})
s[1, 0] = 1
assert s[:, 0] == SparseMatrix(Matrix([0, 1, 0]))
assert s[3] == 1
assert s[3: 4] == [1]
s[1, 1] = 42
assert s[1, 1] == 42
assert s[1, 1:] == SparseMatrix([[42, 0]])
s[1, 1:] = Matrix([[5, 6]])
assert s[1, :] == SparseMatrix([[1, 5, 6]])
s[1, 1:] = [[42, 43]]
assert s[1, :] == SparseMatrix([[1, 42, 43]])
s[0, 0] = 17
assert s[:, :1] == SparseMatrix([17, 1, 0])
s[0, 0] = [1, 1, 1]
assert s[:, 0] == SparseMatrix([1, 1, 1])
s[0, 0] = Matrix([1, 1, 1])
assert s[:, 0] == SparseMatrix([1, 1, 1])
s[0, 0] = SparseMatrix([1, 1, 1])
assert s[:, 0] == SparseMatrix([1, 1, 1])
def test_sparse_solve():
from sympy.matrices import SparseMatrix
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
assert A.cholesky() == Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
assert A.cholesky() * A.cholesky().T == Matrix([
[25, 15, -5],
[15, 18, 0],
[-5, 0, 11]])
A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
L, D = A.LDLdecomposition()
assert 15*L == Matrix([
[15, 0, 0],
[ 9, 15, 0],
[-3, 5, 15]])
assert D == Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
assert L * D * L.T == A
A = SparseMatrix(((3, 0, 2), (0, 0, 1), (1, 2, 0)))
assert A.inv() * A == SparseMatrix(eye(3))
A = SparseMatrix([
[ 2, -1, 0],
[-1, 2, -1],
[ 0, 0, 2]])
ans = SparseMatrix([
[S(2)/3, S(1)/3, S(1)/6],
[S(1)/3, S(2)/3, S(1)/3],
[ 0, 0, S(1)/2]])
assert A.inv(method='CH') == ans
assert A.inv(method='LDL') == ans
assert A * ans == SparseMatrix(eye(3))
s = A.solve(A[:, 0], 'LDL')
assert A*s == A[:, 0]
s = A.solve(A[:, 0], 'CH')
assert A*s == A[:, 0]
A = A.col_join(A)
s = A.solve_least_squares(A[:, 0], 'CH')
assert A*s == A[:, 0]
s = A.solve_least_squares(A[:, 0], 'LDL')
assert A*s == A[:, 0]
|
bsd-3-clause
| -4,237,973,322,416,478,000
| 28.998198
| 79
| 0.417923
| false
| 2.787843
| true
| false
| false
|
TshepangRas/tshilo-dikotla
|
td_maternal/forms/antenatal_enrollment_form.py
|
1
|
2783
|
from dateutil.relativedelta import relativedelta
from django import forms
from edc_constants.constants import YES
from td_maternal.models.enrollment_helper import EnrollmentHelper
from ..models import AntenatalEnrollment, MaternalEligibility
from .base_enrollment_form import BaseEnrollmentForm
class AntenatalEnrollmentForm(BaseEnrollmentForm):
def clean(self):
cleaned_data = super(AntenatalEnrollmentForm, self).clean()
# registered_subject = cleaned_data.get('registered_subject')
# if not registered_subject:
# raise forms.ValidationError('Expected a registered subject. Got None.')
# if not self.instance.id:
# registered_subject = cleaned_data.get('registered_subject')
# try:
# PostnatalEnrollment.objects.get(registered_subject=registered_subject)
# raise forms.ValidationError(
# "Antenatal enrollment is NOT REQUIRED. Postnatal Enrollment already completed")
# except PostnatalEnrollment.DoesNotExist:
# pass
# self.fill_postnatal_enrollment_if_recently_delivered()
# self.raise_if_rapid_test_required()
self.validate_last_period_date(cleaned_data.get('report_datetime'), cleaned_data.get('last_period_date'))
enrollment_helper = EnrollmentHelper(instance_antenatal=self._meta.model(**cleaned_data),
exception_cls=forms.ValidationError)
enrollment_helper.raise_validation_error_for_rapidtest()
return cleaned_data
def validate_last_period_date(self, report_datetime, last_period_date):
if last_period_date and (last_period_date >= report_datetime.date() - relativedelta(weeks=4)):
raise forms.ValidationError('LMP cannot be within 4weeks of report datetime. '
'Got LMP as {} and report datetime as {}'.format(last_period_date,
report_datetime))
def clean_rapid_test_date(self):
rapid_test_date = self.cleaned_data['rapid_test_date']
registered_subject = self.cleaned_data['registered_subject']
if rapid_test_date:
try:
initial = AntenatalEnrollment.objects.get(
registered_subject=registered_subject)
if initial:
if rapid_test_date != initial.rapid_test_date:
raise forms.ValidationError('The rapid test result cannot be changed')
except AntenatalEnrollment.DoesNotExist:
pass
return rapid_test_date
class Meta:
model = AntenatalEnrollment
fields = '__all__'
|
gpl-2.0
| -5,039,070,942,179,587,000
| 46.169492
| 113
| 0.625943
| false
| 4.48871
| true
| false
| false
|
damoxc/vsmtpd
|
vsmtpd/daemon.py
|
1
|
7764
|
#
# vsmtpd/daemon.py
#
# Copyright (C) 2011 Damien Churchill <damoxc@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
import os
import sys
import gevent
import signal
import logging
import vsmtpd.logging_setup
from gevent import socket
from gevent.pool import Pool
from gevent.server import StreamServer
from optparse import OptionParser
from vsmtpd.config import load_config
from vsmtpd.config import ConfigWrapper
from vsmtpd.connection import Connection
from vsmtpd.hooks import HookManager
from vsmtpd.plugins.manager import PluginManager
from vsmtpd.util import set_cmdline
log = logging.getLogger(__name__)
vsmtpd = None
class Vsmtpd(object):
def __init__(self, options, args):
self.options = options
self.args = args
self.pool = None
self.workers = []
# Load the configuration for the server
self.load_config()
# If we positive connection limit create a Pool with that limit
connection_limit = self.config.getint('connection_limit')
if connection_limit > 0:
self.pool = Pool(connection_limit)
log.info('Limiting connections to %d', connection_limit)
# Create the hook manager
self.hook_manager = HookManager()
# Create the plugin manager
plugin_path = self.config.get('plugin_path').split(':')
self.plugin_manager = PluginManager(plugin_path)
def fire(self, hook_name, *args, **kwargs):
return self.hook_manager.dispatch_hook(hook_name, *args, **kwargs)
def handle(self, socket, address):
connection = Connection(self, socket, address)
connection.run_hooks('pre_connection', connection)
connection.accept()
connection.run_hooks('post_connection', connection)
def load_config(self):
self._config = load_config(self.options.config or 'vsmtpd.cfg', {
'vsmtpd': {
'port': 25,
'interface': None,
'backlog': 50,
'workers': 0,
'size_limit': 0,
'helo_host': None,
'connection_limit': 100,
'spool_dir': '/var/spool/vsmtpd',
'keyfile': None,
'certfile': None,
'cert_reqs': None,
# FIXME: Provide a default secure (SSLV3/TLSV1) cipher setup
'ssl_version': None,
'ca_certs': None,
'suppress_ragged_eofs': None,
'do_handshake_on_connect': None,
# FIXME: Provide a default secure (SSLV3/TLSV1) cipher setup
'ciphers': None,
'plugin_path': '/usr/share/vsmtpd/plugins'
}
})
self.config = ConfigWrapper(self._config, 'vsmtpd')
def load_plugins(self):
log.info('Loading plugins...')
# Load the plugins
for section in self._config.sections():
if not section.startswith('plugin:'):
continue
plugin_name = section.split(':', 1)[1]
try:
plugin_cls = self.plugin_manager.load(plugin_name)
except Exception as e:
log.fatal("Failed to load plugin '%s'", plugin_name)
log.exception(e)
exit(1)
try:
if self._config.options(section):
plugin = plugin_cls(ConfigWrapper(self._config, section))
else:
plugin = plugin_cls()
plugin.plugin_name = plugin_name
except Exception as e:
log.fatal("Failed to initialise plugin '%s'", plugin_name)
log.exception(e)
exit(1)
self.hook_manager.register_object(plugin)
def reload(self):
"""
Reload the configuration.
"""
def start(self):
"""
Starts the vsmtpd server in either master or worker mode.
"""
# Install the signal handlers
signal.signal(signal.SIGTERM, self.stop)
signal.signal(signal.SIGHUP, self.reload)
signal.signal(signal.SIGINT, self.stop)
workers = self.config.getint('workers')
backlog = self.config.getint('backlog')
addr = ('0.0.0.0', 2500)
if backlog < 1:
backlog = 50
log.info('Starting server on %s port %d', *addr)
if workers <= 0:
set_cmdline('vsmtpd: master')
self._start(addr, backlog)
# Open the socket for master/worker operation.
self.sock = socket.socket()
self.sock.bind(addr)
self.sock.listen(backlog)
self.sock.setblocking(0)
# Spawn the worker servers
for i in xrange(0, workers):
self._start_slave()
# Set the process title
set_cmdline('vsmtpd: master')
# Wait for the children to die
try:
os.waitpid(-1, 0)
except OSError:
pass
def _start(self, listener, backlog=None):
"""
Starts the vsmtpd server.
"""
self.server = StreamServer(listener, self.handle, backlog=backlog,
spawn=self.pool)
self.server.serve_forever()
def _start_slave(self):
"""
Starts a new slave worker process.
"""
pid = os.fork()
if pid == 0:
# Set up the command line and logger id
set_cmdline('vsmtpd: worker')
log.connection_id = 'worker'
# Call event_reinit()
gevent.reinit()
# Start vsmtpd
self._start(self.sock)
else:
log.info('Worker spawned PID %d', pid)
self.workers.append(pid)
def stop(self, *args):
"""
Shuts down the vsmtpd server and any workers running.
"""
# Shut down the server or the socket, depending on which is running
if self.workers:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
for pid in self.workers:
os.kill(pid, signal.SIGTERM)
else:
self.server.stop()
# Finally exit successfully
sys.exit()
def main():
global vsmtpd
parser = OptionParser()
parser.add_option('-c', '--config', dest='config', action='store',
default=None, help='the configuration file to use')
parser.add_option('-l', '--listen', dest='listen', action='append',
help='listen on this address')
parser.add_option('-p', '--port', dest='port', type='int', default=None,
help='set the default port to listen on')
(options, args) = parser.parse_args()
# Configure logging
logging.basicConfig(
level=logging.DEBUG,
format = '%(asctime)s %(levelname)-6s [%(name)-20s:%(lineno)-3s] [%(conn_id)-7s] %(message)s',
datefmt = '%a %d %b %Y %H:%M:%S'
)
log.connection_id = 'master'
vsmtpd = Vsmtpd(options, args)
vsmtpd.load_plugins()
try:
vsmtpd.start()
except KeyboardInterrupt:
vsmtpd.stop()
|
gpl-3.0
| -6,894,904,686,943,122,000
| 30.180723
| 102
| 0.577409
| false
| 4.069182
| true
| false
| false
|
zentralopensource/zentral
|
zentral/contrib/osquery/serializers.py
|
1
|
5197
|
from rest_framework import serializers
from zentral.contrib.inventory.models import EnrollmentSecret
from zentral.contrib.inventory.serializers import EnrollmentSecretSerializer
from .models import Configuration, Enrollment, Pack, Platform
class ConfigurationSerializer(serializers.ModelSerializer):
class Meta:
model = Configuration
fields = ("id", "name", "description",
"inventory", "inventory_apps", "inventory_interval",
"options",
"created_at", "updated_at")
class EnrollmentSerializer(serializers.ModelSerializer):
secret = EnrollmentSecretSerializer(many=False)
enrolled_machines_count = serializers.SerializerMethodField()
class Meta:
model = Enrollment
# TODO: distributor, maybe with a link ?
fields = ("id", "configuration", "enrolled_machines_count", "osquery_release", "secret", "version")
def get_enrolled_machines_count(self, obj):
return obj.enrolledmachine_set.count()
def create(self, validated_data):
secret_data = validated_data.pop('secret')
secret = EnrollmentSecret.objects.create(**secret_data)
enrollment = Enrollment.objects.create(secret=secret, **validated_data)
return enrollment
def update(self, instance, validated_data):
secret_serializer = self.fields["secret"]
secret_data = validated_data.pop('secret')
secret_serializer.update(instance.secret, secret_data)
return super().update(instance, validated_data)
# Standard Osquery packs
class OsqueryPlatformField(serializers.ListField):
def to_internal_value(self, data):
if data:
platforms = set(data.lower().split(","))
if platforms:
unknown_platforms = platforms - Platform.accepted_platforms()
if unknown_platforms:
raise serializers.ValidationError(
'Unknown platforms: {}'.format(", ".join(sorted(unknown_platforms)))
)
return sorted(platforms)
return []
class OsqueryQuerySerializer(serializers.Serializer):
query = serializers.CharField(allow_blank=False)
interval = serializers.IntegerField(min_value=1, max_value=604800)
removed = serializers.BooleanField(required=False)
snapshot = serializers.BooleanField(required=False)
platform = OsqueryPlatformField(required=False)
version = serializers.RegexField(r"^[0-9]{1,4}\.[0-9]{1,4}\.[0-9]{1,4}$", required=False)
shard = serializers.IntegerField(min_value=1, max_value=100, required=False)
denylist = serializers.BooleanField(default=True, required=False)
description = serializers.CharField(allow_blank=True, required=False)
value = serializers.CharField(allow_blank=False, required=False)
def validate(self, data):
snapshot = data.get("snapshot", False)
if snapshot and data.get("removed"):
raise serializers.ValidationError('{"action": "removed"} results are not available in "snapshot" mode')
return data
class OsqueryPackSerializer(serializers.Serializer):
name = serializers.CharField(max_length=256, required=False)
description = serializers.CharField(required=False)
discovery = serializers.ListField(child=serializers.CharField(allow_blank=False), allow_empty=True, required=False)
platform = OsqueryPlatformField(required=False)
version = serializers.RegexField(r"^[0-9]{1,4}\.[0-9]{1,4}\.[0-9]{1,4}$", required=False)
shard = serializers.IntegerField(min_value=1, max_value=100, required=False)
queries = serializers.DictField(child=OsqueryQuerySerializer(), allow_empty=False)
def get_pack_defaults(self, slug):
return {
"name": self.data.get("name", slug),
"description": self.data.get("description", ""),
"discovery_queries": self.data.get("discovery", []),
"shard": self.data.get("shard", None)
}
def iter_query_defaults(self, pack_slug):
pack_platforms = self.data.get("platform", [])
pack_minimum_osquery_version = self.data.get("version", None)
for query_slug, query_data in self.data["queries"].items():
pack_query_defaults = {
"slug": query_slug,
"interval": query_data["interval"],
"log_removed_actions": not query_data.get("snapshot", False) and query_data.get("removed", True),
"snapshot_mode": query_data.get("snapshot", False),
"shard": query_data.get("shard"),
"can_be_denylisted": query_data.get("can_be_denylisted", True),
}
query_defaults = {
"name": f"{pack_slug}{Pack.DELIMITER}{query_slug}",
"sql": query_data["query"],
"platforms": query_data.get("platform", pack_platforms),
"minimum_osquery_version": query_data.get("version", pack_minimum_osquery_version),
"description": query_data.get("description", ""),
"value": query_data.get("value", "")
}
yield query_slug, pack_query_defaults, query_defaults
|
apache-2.0
| -9,080,164,344,115,751,000
| 44.587719
| 119
| 0.646527
| false
| 4.181014
| false
| false
| false
|
GreatSCT/GreatSCT
|
lib/common/orchestra.py
|
1
|
8604
|
'''
This is the conductor which controls everything
'''
import glob
import imp
import os
import readline
import sys
from lib.common import completer
from lib.common import helpers
from lib.common import messages
class Conductor:
def __init__(self, cli_stuff):
# Create dictionaries of supported modules
# empty until stuff loaded into them
self.imported_tools = {}
self.load_tools(cli_stuff)
self.mainmenu_commands = {
"list": "List available tools",
"use": "Use a specific tool",
"info": "Information on a specific tool",
"update": "Update GreatSCT",
"exit": "Exit GreatSCT"}
self.number_of_tools = len(self.imported_tools)
self.command_line_options = cli_stuff
def command_line_use(self):
tool_found = False
for key, tool_object in sorted(self.imported_tools.items()):
# if the entered number matches the payload, use that payload
if self.command_line_options.tool.lower() == tool_object.cli_name.lower():
tool_object.cli_menu()
tool_found = True
if not tool_found:
print(helpers.color('Error: You did not provide a valid tool name!', warning=True))
print(helpers.color('Quitting GreatSCT...', warning=True))
sys.exit()
def list_tools(self):
# show title bar
messages.title_screen()
# Loop over all tools loaded into GreatSCT, print name and description
# Function for listing all payloads
tool_counter = 1
print(helpers.color(' [*] Available Tools:\n'))
for key, tool in sorted(self.imported_tools.items()):
print('\t' + str(tool_counter) + ")\t" + tool.cli_name)
tool_counter += 1
print()
return
def load_tools(self, command_line_object):
# Load all tools within the Tools folder
# (Evasion, Ordnance, Pillage, etc.)
for name in glob.glob('Tools/*/Tool.py'):
if name.endswith(".py") and ("__init__" not in name):
loaded_tool = imp.load_source(
name.replace("/", ".").rstrip('.py'), name)
self.imported_tools[name] = loaded_tool.Tools(
command_line_object)
return
def main_menu(self):
# default blank command for the main meny loop
main_menu_command = ''
show_header = True
# Try except to catch keyboard interrupts
try:
# Loop for the main menu, will always loop as long as command is ''
while main_menu_command == '':
comp = completer.GreatSCTMainMenuCompleter(self.mainmenu_commands, self.imported_tools)
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(comp.complete)
if show_header:
messages.title_screen()
print("Main Menu")
print("\n\t" + helpers.color(len(self.imported_tools)) + " tools loaded\n")
print("Available Commands:\n")
for command in sorted(self.mainmenu_commands.keys()):
print("\t" + helpers.color(command) + '\t\t\t' + self.mainmenu_commands[command])
print()
main_menu_command = input('Main menu choice: ').strip()
if main_menu_command.startswith('use'):
# Check to make sure a tool is provided with use command
if len(main_menu_command.split()) == 1:
# List tools, don't show header, loop back in main menu
self.list_tools()
show_header = False
main_menu_command = ''
elif len(main_menu_command.split()) == 2:
# Grab the command, either the number or word
tool_choice = main_menu_command.split()[1]
# if we're choosing the payload by numbers
if tool_choice.isdigit() and\
0 < int(tool_choice) <= len(self.imported_tools):
tool_number = 1
for key, tool_object in sorted(self.imported_tools.items()):
# if the entered number matches the payload, use that payload
if int(tool_choice) == tool_number:
tool_object.tool_main_menu()
tool_number += 1
show_header = True
else:
tool_number += 1
show_header = True
# Else if selecting payload by name
else:
for key, tool_object in sorted(self.imported_tools.items()):
# if the entered number matches the payload, use that payload
if tool_choice.lower() == tool_object.cli_name.lower():
tool_object.tool_main_menu()
show_header = True
# Once done with tool, clear main menu command
main_menu_command = ''
show_header = True
# Catch anything else, like an error
else:
main_menu_command = ''
elif main_menu_command.startswith('list'):
# List tools, don't show header, loop back in main menu
self.list_tools()
show_header = False
main_menu_command = ''
elif main_menu_command.startswith('info'):
if len(main_menu_command.split()) == 1:
show_header = True
main_menu_command = ''
elif len(main_menu_command.split()) == 2:
# Grab the command, either the number or word
info_choice = main_menu_command.split()[1]
# if we're choosing the payload by numbers
if info_choice.isdigit() and\
0 < int(info_choice) <= len(self.imported_tools):
tool_number = 1
for key, tool_object in sorted(self.imported_tools.items()):
# if the entered number matches the tool, use that tool
if int(info_choice) == tool_number:
print()
print(helpers.color(tool_object.cli_name) + " => " + tool_object.description)
print()
show_header = False
tool_number += 1
# if the entered name matches the tool, use that tool
else:
for key, tool_object in sorted(self.imported_tools.items()):
if main_menu_command.split()[1].lower() == tool_object.cli_name.lower():
print()
print(helpers.color(tool_object.cli_name) + " => " + tool_object.description)
print()
show_header = False
main_menu_command = ''
else:
main_menu_command = ''
show_header = True
elif main_menu_command.startswith('update'):
self.update_greatsct()
main_menu_command = ''
elif main_menu_command.startswith('exit'):
print('\n' + helpers.color('You rage quit GreatSCT!', warning=True) + '\n')
sys.exit()
else:
show_header = True
main_menu_command = ''
except KeyboardInterrupt:
print("\n\n" + helpers.color("You rage quit GreatSCT!", warning=True))
sys.exit()
def update_greatsct(self):
os.system('git pull')
input('GreatSCT has checked for updates, press enter to continue')
return
|
gpl-3.0
| 3,642,661,304,942,000,600
| 40.565217
| 113
| 0.477569
| false
| 4.941987
| false
| false
| false
|
zultron/virt-manager
|
virtManager/domain.py
|
1
|
61912
|
#
# Copyright (C) 2006, 2013 Red Hat, Inc.
# Copyright (C) 2006 Daniel P. Berrange <berrange@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
# pylint: disable=E0611
from gi.repository import GObject
# pylint: enable=E0611
import logging
import os
import time
import threading
import libvirt
from virtinst import DomainSnapshot
from virtinst import Guest
from virtinst import util
from virtinst import VirtualController
from virtManager import uihelpers
from virtManager.libvirtobject import vmmLibvirtObject
def compare_device(origdev, newdev, idx):
devprops = {
"disk" : ["target", "bus"],
"interface" : ["macaddr", "vmmindex"],
"input" : ["bus", "type", "vmmindex"],
"sound" : ["model", "vmmindex"],
"video" : ["model", "vmmindex"],
"watchdog" : ["vmmindex"],
"hostdev" : ["type", "managed", "vmmindex",
"product", "vendor",
"function", "domain", "slot"],
"serial" : ["type", "target_port"],
"parallel" : ["type", "target_port"],
"console" : ["type", "target_type", "target_port"],
"graphics" : ["type", "vmmindex"],
"controller" : ["type", "index"],
"channel" : ["type", "target_name"],
"filesystem" : ["target" , "vmmindex"],
"smartcard" : ["mode" , "vmmindex"],
"redirdev" : ["bus" , "type", "vmmindex"],
"tpm" : ["type" , "vmmindex"],
"rng" : ["type" , "vmmindex"],
}
if id(origdev) == id(newdev):
return True
if type(origdev) is not type(newdev):
return False
for devprop in devprops[origdev.virtual_device_type]:
origval = getattr(origdev, devprop)
if devprop == "vmmindex":
newval = idx
else:
newval = getattr(newdev, devprop)
if origval != newval:
return False
return True
def find_device(guest, origdev):
devlist = guest.get_devices(origdev.virtual_device_type)
for idx in range(len(devlist)):
dev = devlist[idx]
if compare_device(origdev, dev, idx):
return dev
return None
def start_job_progress_thread(vm, meter, progtext):
current_thread = threading.currentThread()
def jobinfo_cb():
while True:
time.sleep(.5)
if not current_thread.isAlive():
return False
try:
jobinfo = vm.job_info()
data_total = float(jobinfo[3])
# data_processed = float(jobinfo[4])
data_remaining = float(jobinfo[5])
# data_total is 0 if the job hasn't started yet
if not data_total:
continue
if not meter.started:
meter.start(size=data_total,
text=progtext)
progress = data_total - data_remaining
meter.update(progress)
except:
logging.exception("Error calling jobinfo")
return False
return True
if vm.getjobinfo_supported:
t = threading.Thread(target=jobinfo_cb,
name="job progress reporting",
args=())
t.daemon = True
t.start()
class vmmInspectionData(object):
def __init__(self):
self.type = None
self.distro = None
self.major_version = None
self.minor_version = None
self.hostname = None
self.product_name = None
self.product_variant = None
self.icon = None
self.applications = None
self.error = False
class vmmDomainSnapshot(vmmLibvirtObject):
"""
Class wrapping a virDomainSnapshot object
"""
def __init__(self, conn, backend):
vmmLibvirtObject.__init__(self, conn, backend, backend.getName(),
DomainSnapshot)
self.refresh_xml()
def get_name(self):
return self.get_xmlobj().name
def _XMLDesc(self, flags):
return self._backend.getXMLDesc(flags=flags)
def delete(self, force=True):
ignore = force
self._backend.delete()
def run_status(self):
status = DomainSnapshot.state_str_to_int(self.get_xmlobj().state)
return vmmDomain.pretty_run_status(status)
def run_status_icon_name(self):
status = DomainSnapshot.state_str_to_int(self.get_xmlobj().state)
if status not in uihelpers.vm_status_icons:
logging.debug("Unknown status %d, using NOSTATE", status)
status = libvirt.VIR_DOMAIN_NOSTATE
return uihelpers.vm_status_icons[status]
def is_external(self):
if self.get_xmlobj().memory_type == "external":
return True
for disk in self.get_xmlobj().disks:
if disk.snapshot == "external":
return True
return False
class vmmDomain(vmmLibvirtObject):
"""
Class wrapping virDomain libvirt objects. Is also extended to be
backed by a virtinst.Guest object for new VM 'customize before install'
"""
__gsignals__ = {
"status-changed": (GObject.SignalFlags.RUN_FIRST, None, [int, int]),
"resources-sampled": (GObject.SignalFlags.RUN_FIRST, None, []),
"inspection-changed": (GObject.SignalFlags.RUN_FIRST, None, []),
"pre-startup": (GObject.SignalFlags.RUN_FIRST, None, [object]),
}
@staticmethod
def pretty_run_status(status, has_saved=False):
if status == libvirt.VIR_DOMAIN_RUNNING:
return _("Running")
elif status == libvirt.VIR_DOMAIN_PAUSED:
return _("Paused")
elif status == libvirt.VIR_DOMAIN_SHUTDOWN:
return _("Shutting Down")
elif status == libvirt.VIR_DOMAIN_SHUTOFF:
if has_saved:
return _("Saved")
else:
return _("Shutoff")
elif status == libvirt.VIR_DOMAIN_CRASHED:
return _("Crashed")
elif (hasattr(libvirt, "VIR_DOMAIN_PMSUSPENDED") and
status == libvirt.VIR_DOMAIN_PMSUSPENDED):
return _("Suspended")
logging.debug("Unknown status %d, returning 'Unknown'", status)
return _("Unknown")
def __init__(self, conn, backend, key):
vmmLibvirtObject.__init__(self, conn, backend, key, Guest)
self.uuid = key
self.cloning = False
self.record = []
self.maxRecord = {
"diskRdRate" : 10.0,
"diskWrRate" : 10.0,
"netTxRate" : 10.0,
"netRxRate" : 10.0,
}
self._install_abort = False
self.reboot_listener = None
self._startup_vcpus = None
self._is_management_domain = None
self._id = None
self._name = None
self._snapshot_list = None
self.lastStatus = libvirt.VIR_DOMAIN_SHUTOFF
self.managedsave_supported = False
self.remote_console_supported = False
self.title_supported = False
self._enable_net_poll = False
self._stats_net_supported = True
self._stats_net_skip = []
self._enable_disk_poll = False
self._stats_disk_supported = True
self._stats_disk_skip = []
self.inspection = vmmInspectionData()
if isinstance(self._backend, Guest):
return
self._libvirt_init()
def _cleanup(self):
for snap in self._snapshot_list or []:
snap.cleanup()
self._snapshot_list = None
def _libvirt_init(self):
"""
Initialization to do if backed by a libvirt virDomain
"""
self.managedsave_supported = self.conn.check_support(
self.conn.SUPPORT_DOMAIN_MANAGED_SAVE, self._backend)
self.remote_console_supported = self.conn.check_support(
self.conn.SUPPORT_DOMAIN_CONSOLE_STREAM, self._backend)
self.title_supported = self.conn.check_support(
self.conn.SUPPORT_DOMAIN_GET_METADATA, self._backend)
# Determine available XML flags (older libvirt versions will error
# out if passed SECURE_XML, INACTIVE_XML, etc)
(self._inactive_xml_flags,
self._active_xml_flags) = self.conn.get_dom_flags(self._backend)
self.toggle_sample_network_traffic()
self.toggle_sample_disk_io()
self.force_update_status()
# Hook up listeners that need to be cleaned up
self.add_gconf_handle(
self.config.on_stats_enable_net_poll_changed(
self.toggle_sample_network_traffic))
self.add_gconf_handle(
self.config.on_stats_enable_disk_poll_changed(
self.toggle_sample_disk_io))
self.connect("status-changed", self._update_start_vcpus)
self.connect("pre-startup", self._prestartup_nodedev_check)
def _prestartup_nodedev_check(self, src, ret):
ignore = src
error = _("There is more than one '%s' device attached to "
"your host, and we can't determine which one to "
"use for your guest.\n"
"To fix this, remove and reattach the USB device "
"to your guest using the 'Add Hardware' wizard.")
for hostdev in self.get_hostdev_devices():
devtype = hostdev.type
if devtype != "usb":
continue
vendor = hostdev.vendor
product = hostdev.product
bus = hostdev.bus
device = hostdev.device
if vendor and product:
count = self.conn.get_nodedevs_number("usb_device",
vendor,
product)
if count > 1 and not (bus and device):
prettyname = "%s %s" % (vendor, product)
ret.append(error % prettyname)
###########################
# Misc API getter methods #
###########################
def get_name(self):
if self._name is None:
self._name = self._backend.name()
return self._name
def get_name_or_title(self):
title = self.get_title()
if title:
return title
return self.get_name()
def get_title(self):
return self.get_xmlobj(inactive=True).title
def get_id(self):
if self._id is None:
self._id = self._backend.ID()
return self._id
def status(self):
return self.lastStatus
def get_cloning(self):
return self.cloning
def set_cloning(self, val):
self.cloning = bool(val)
# If manual shutdown or destroy specified, make sure we don't continue
# install process
def get_install_abort(self):
return bool(self._install_abort)
def rhel6_defaults(self):
return self.conn.rhel6_defaults(self.get_emulator())
def is_read_only(self):
if self.is_management_domain():
return True
return False
def is_management_domain(self):
if self._is_management_domain is None:
self._is_management_domain = (self.get_id() == 0)
return self._is_management_domain
def has_spicevmc_type_redirdev(self):
devs = self.get_redirdev_devices()
for dev in devs:
if dev.type == "spicevmc":
return True
return False
def get_id_pretty(self):
i = self.get_id()
if i < 0:
return "-"
return str(i)
##################
# Support checks #
##################
def _get_getvcpus_supported(self):
return self.conn.check_support(
self.conn.SUPPORT_DOMAIN_GETVCPUS, self._backend)
getvcpus_supported = property(_get_getvcpus_supported)
def _get_getjobinfo_supported(self):
return self.conn.check_support(
self.conn.SUPPORT_DOMAIN_JOB_INFO, self._backend)
getjobinfo_supported = property(_get_getjobinfo_supported)
def snapshots_supported(self):
if not self.conn.check_support(
self.conn.SUPPORT_DOMAIN_LIST_SNAPSHOTS, self._backend):
return _("Libvirt connection does not support snapshots.")
if self.list_snapshots():
return
# Check if our disks are all qcow2
seen_qcow2 = False
for disk in self.get_disk_devices(refresh_if_nec=False):
if disk.read_only:
continue
if not disk.path:
continue
if disk.driver_type == "qcow2":
seen_qcow2 = True
continue
return _("Snapshots are only supported if all writeable disks "
"images allocated to the guest are qcow2 format.")
if not seen_qcow2:
return _("Snapshots require at least one writeable qcow2 disk "
"image allocated to the guest.")
#############################
# Internal XML handling API #
#############################
def _invalidate_xml(self):
vmmLibvirtObject._invalidate_xml(self)
self._name = None
self._id = None
def _redefine_device(self, cb, origdev):
defguest = self._get_xmlobj_to_define()
dev = find_device(defguest, origdev)
if dev:
return cb(dev)
# If we are removing multiple dev from an active VM, a double
# attempt may result in a lookup failure. If device is present
# in the active XML, assume all is good.
if find_device(self.get_xmlobj(), origdev):
logging.debug("Device in active config but not inactive config.")
return
raise RuntimeError(_("Could not find specified device in the "
"inactive VM configuration: %s") % repr(origdev))
##############################
# Persistent XML change APIs #
##############################
def define_name(self, newname):
return self._define_name_helper("domain",
self.conn.rename_vm,
newname)
# Device Add/Remove
def add_device(self, devobj):
"""
Redefine guest with appended device XML 'devxml'
"""
def change(guest):
guest.add_device(devobj)
ret = self._redefine(change)
self.redefine_cached()
return ret
def remove_device(self, devobj):
"""
Remove passed device from the inactive guest XML
"""
# HACK: If serial and console are both present, they both need
# to be removed at the same time
con = None
if hasattr(devobj, "virtmanager_console_dup"):
con = getattr(devobj, "virtmanager_console_dup")
def change(guest):
def rmdev(editdev):
if con:
rmcon = find_device(guest, con)
if rmcon:
guest.remove_device(rmcon)
guest.remove_device(editdev)
return self._redefine_device(rmdev, devobj)
ret = self._redefine(change)
self.redefine_cached()
return ret
# CPU define methods
def define_vcpus(self, vcpus, maxvcpus):
def change(guest):
guest.curvcpus = int(vcpus)
guest.vcpus = int(maxvcpus)
return self._redefine(change)
def define_cpuset(self, cpuset):
def change(guest):
guest.cpuset = cpuset
return self._redefine(change)
def define_cpu_topology(self, sockets, cores, threads):
def change(guest):
cpu = guest.cpu
cpu.sockets = sockets
cpu.cores = cores
cpu.threads = threads
return self._redefine(change)
def define_cpu(self, model, vendor, from_host, featurelist):
def change(guest):
if from_host:
guest.cpu.copy_host_cpu()
elif guest.cpu.model != model:
# Since we don't expose this in the UI, have host value trump
# caps value
guest.cpu.vendor = vendor
guest.cpu.model = model or None
if guest.cpu.model is None:
for f in guest.cpu.features:
guest.cpu.remove_feature(f)
return
origfeatures = guest.cpu.features
def set_feature(fname, fpol):
for f in origfeatures:
if f.name != fname:
continue
if f.policy != fpol:
if fpol == "default":
guest.cpu.remove_feature(f)
else:
f.policy = fpol
return
if fpol != "default":
guest.cpu.add_feature(fname, fpol)
# Sync feature lists
for fname, fpol in featurelist:
set_feature(fname, fpol)
return self._redefine(change)
# Mem define methods
def define_both_mem(self, memory, maxmem):
def change(guest):
guest.memory = int(memory)
guest.maxmemory = int(maxmem)
return self._redefine(change)
# Security define methods
def define_seclabel(self, model, t, label, relabel):
def change(guest):
seclabel = guest.seclabel
seclabel.model = model or None
if not model:
return
if relabel is not None:
if relabel:
seclabel.relabel = "yes"
else:
seclabel.relabel = "no"
seclabel.type = t
if label:
seclabel.label = label
return self._redefine(change)
# Machine config define methods
def define_acpi(self, newvalue):
def change(guest):
guest.features.acpi = newvalue
return self._redefine(change)
def define_apic(self, newvalue):
def change(guest):
guest.features.apic = newvalue
return self._redefine(change)
def define_clock(self, newvalue):
def change(guest):
guest.clock.offset = newvalue
return self._redefine(change)
def define_machtype(self, newvalue):
def change(guest):
guest.os.machine = newvalue
return self._redefine(change)
def define_description(self, newvalue):
def change(guest):
guest.description = newvalue or None
return self._redefine(change)
def define_title(self, newvalue):
def change(guest):
guest.title = newvalue or None
return self._redefine(change)
# Boot define methods
def set_boot_device(self, boot_list):
def change(guest):
guest.os.bootorder = boot_list
return self._redefine(change)
def set_boot_menu(self, newval):
def change(guest):
guest.os.enable_bootmenu = bool(newval)
return self._redefine(change)
def set_boot_kernel(self, kernel, initrd, dtb, args):
def change(guest):
guest.os.kernel = kernel or None
guest.os.initrd = initrd or None
guest.os.dtb = dtb or None
guest.os.kernel_args = args or None
return self._redefine(change)
def set_boot_init(self, init):
def change(guest):
guest.os.init = init
return self._redefine(change)
# Disk define methods
def define_storage_media(self, devobj, newpath):
def change(editdev):
editdev.path = newpath
editdev.sync_path_props()
return self._redefine_device(change, devobj)
def define_disk_readonly(self, devobj, do_readonly):
def change(editdev):
editdev.read_only = do_readonly
return self._redefine_device(change, devobj)
def define_disk_shareable(self, devobj, do_shareable):
def change(editdev):
editdev.shareable = do_shareable
return self._redefine_device(change, devobj)
def define_disk_removable(self, devobj, do_removable):
def change(editdev):
editdev.removable = do_removable
return self._redefine_device(change, devobj)
def define_disk_cache(self, devobj, new_cache):
def change(editdev):
editdev.driver_cache = new_cache or None
return self._redefine_device(change, devobj)
def define_disk_io(self, devobj, val):
def change(editdev):
editdev.driver_io = val or None
return self._redefine_device(change, devobj)
def define_disk_driver_type(self, devobj, new_driver_type):
def change(editdev):
editdev.driver_type = new_driver_type or None
return self._redefine_device(change, devobj)
def define_disk_bus(self, devobj, newval, addr):
def change(editdev):
oldprefix = editdev.get_target_prefix()[0]
oldbus = editdev.bus
editdev.bus = newval
if oldbus == newval:
return
editdev.address.clear()
editdev.address.set_addrstr(addr)
if oldprefix == editdev.get_target_prefix()[0]:
return
used = []
disks = (self.get_disk_devices() +
self.get_disk_devices(inactive=True))
for d in disks:
used.append(d.target)
if editdev.target:
used.remove(editdev.target)
editdev.target = None
editdev.generate_target(used)
return self._redefine_device(change, devobj)
def define_disk_serial(self, devobj, val):
def change(editdev):
if val != editdev.serial:
editdev.serial = val or None
return self._redefine_device(change, devobj)
def define_disk_iotune_rbs(self, devobj, val):
def change(editdev):
editdev.iotune_rbs = val
return self._redefine_device(change, devobj)
def define_disk_iotune_ris(self, devobj, val):
def change(editdev):
editdev.iotune_ris = val
return self._redefine_device(change, devobj)
def define_disk_iotune_tbs(self, devobj, val):
def change(editdev):
editdev.iotune_tbs = val
return self._redefine_device(change, devobj)
def define_disk_iotune_tis(self, devobj, val):
def change(editdev):
editdev.iotune_tis = val
return self._redefine_device(change, devobj)
def define_disk_iotune_wbs(self, devobj, val):
def change(editdev):
editdev.iotune_wbs = val
return self._redefine_device(change, devobj)
def define_disk_iotune_wis(self, devobj, val):
def change(editdev):
editdev.iotune_wis = val
return self._redefine_device(change, devobj)
# Network define methods
def define_network_source(self, devobj, newtype, newsource, newmode):
def change(editdev):
if not newtype:
return
editdev.source = None
editdev.type = newtype
editdev.source = newsource
editdev.source_mode = newmode or None
return self._redefine_device(change, devobj)
def define_network_model(self, devobj, newmodel, addr):
def change(editdev):
if editdev.model != newmodel:
editdev.address.clear()
editdev.address.set_addrstr(addr)
editdev.model = newmodel
return self._redefine_device(change, devobj)
def define_virtualport(self, devobj, newtype, newmanagerid,
newtypeid, newtypeidversion, newinstanceid):
def change(editdev):
editdev.virtualport.type = newtype or None
editdev.virtualport.managerid = newmanagerid or None
editdev.virtualport.typeid = newtypeid or None
editdev.virtualport.typeidversion = newtypeidversion or None
editdev.virtualport.instanceid = newinstanceid or None
return self._redefine_device(change, devobj)
# Graphics define methods
def define_graphics_password(self, devobj, newval):
def change(editdev):
editdev.passwd = newval or None
return self._redefine_device(change, devobj)
def define_graphics_keymap(self, devobj, newval):
def change(editdev):
editdev.keymap = newval
return self._redefine_device(change, devobj)
def define_graphics_type(self, devobj, newval):
def change(editdev):
editdev.type = newval
return self._redefine_device(change, devobj)
# Sound define methods
def define_sound_model(self, devobj, newmodel):
def change(editdev):
if editdev.model != newmodel:
editdev.address.clear()
editdev.model = newmodel
return self._redefine_device(change, devobj)
# Video define methods
def define_video_model(self, devobj, newmodel):
def change(editdev):
if newmodel == editdev.model:
return
editdev.model = newmodel
editdev.address.clear()
# Clear out heads/ram values so they reset to default. If
# we ever allow editing these values in the UI we should
# drop this
editdev.vram = None
editdev.heads = None
editdev.ram = None
return self._redefine_device(change, devobj)
# Watchdog define methods
def define_watchdog_model(self, devobj, newval):
def change(editdev):
if editdev.model != newval:
editdev.address.clear()
editdev.model = newval
return self._redefine_device(change, devobj)
def define_watchdog_action(self, devobj, newval):
def change(editdev):
editdev.action = newval
return self._redefine_device(change, devobj)
# Smartcard define methods
def define_smartcard_mode(self, devobj, newmodel):
def change(editdev):
editdev.mode = newmodel
editdev.type = editdev.TYPE_DEFAULT
return self._redefine_device(change, devobj)
# Controller define methods
def define_controller_model(self, devobj, newmodel):
def change(editdev):
ignore = editdev
guest = self._get_xmlobj_to_define()
ctrls = guest.get_devices("controller")
ctrls = [x for x in ctrls if (x.type ==
VirtualController.TYPE_USB)]
for dev in ctrls:
guest.remove_device(dev)
if newmodel == "ich9-ehci1":
for dev in VirtualController.get_usb2_controllers(
guest.conn):
guest.add_device(dev)
else:
dev = VirtualController(guest.conn)
dev.type = "usb"
if newmodel != "default":
dev.model = newmodel
guest.add_device(dev)
return self._redefine_device(change, devobj)
####################
# Hotplug routines #
####################
def attach_device(self, devobj):
"""
Hotplug device to running guest
"""
if not self.is_active():
return
devxml = devobj.get_xml_config()
self._backend.attachDevice(devxml)
def detach_device(self, devobj):
"""
Hotunplug device from running guest
"""
if not self.is_active():
return
xml = devobj.get_xml_config()
self._backend.detachDevice(xml)
def update_device(self, devobj, flags=1):
if not self.is_active():
return
# Default flag is VIR_DOMAIN_DEVICE_MODIFY_LIVE
xml = devobj.get_xml_config()
self._backend.updateDeviceFlags(xml, flags)
def hotplug_vcpus(self, vcpus):
vcpus = int(vcpus)
if vcpus != self.vcpu_count():
self._backend.setVcpus(vcpus)
def hotplug_memory(self, memory):
if memory != self.get_memory():
self._backend.setMemory(memory)
def hotplug_maxmem(self, maxmem):
if maxmem != self.maximum_memory():
self._backend.setMaxMemory(maxmem)
def hotplug_both_mem(self, memory, maxmem):
logging.info("Hotplugging curmem=%s maxmem=%s for VM '%s'",
memory, maxmem, self.get_name())
if self.is_active():
actual_cur = self.get_memory()
if memory:
if maxmem < actual_cur:
# Set current first to avoid error
self.hotplug_memory(memory)
self.hotplug_maxmem(maxmem)
else:
self.hotplug_maxmem(maxmem)
self.hotplug_memory(memory)
else:
self.hotplug_maxmem(maxmem)
def hotplug_storage_media(self, devobj, newpath):
devobj.path = newpath
self.attach_device(devobj)
def hotplug_graphics_password(self, devobj, newval):
devobj.passwd = newval
self.update_device(devobj)
def hotplug_description(self, desc):
# We already fake hotplug like behavior, by reading the
# description from the inactive XML from a running VM
#
# libvirt since 0.9.10 provides a SetMetadata API that provides
# actual <description> 'hotplug', and using that means checkig
# for support, version, etc.
if not self.conn.check_support(
self.conn.SUPPORT_DOMAIN_SET_METADATA, self._backend):
return
flags = (libvirt.VIR_DOMAIN_AFFECT_LIVE |
libvirt.VIR_DOMAIN_AFFECT_CONFIG)
self._backend.setMetadata(
libvirt.VIR_DOMAIN_METADATA_DESCRIPTION,
desc, None, None, flags)
def hotplug_title(self, title):
if not self.conn.check_support(
self.conn.SUPPORT_DOMAIN_SET_METADATA, self._backend):
return
flags = (libvirt.VIR_DOMAIN_AFFECT_LIVE |
libvirt.VIR_DOMAIN_AFFECT_CONFIG)
self._backend.setMetadata(
libvirt.VIR_DOMAIN_METADATA_TITLE,
title, None, None, flags)
########################
# Libvirt API wrappers #
########################
def _define(self, newxml):
self.conn.define_domain(newxml)
def _XMLDesc(self, flags):
return self._backend.XMLDesc(flags)
def pin_vcpu(self, vcpu_num, pinlist):
self._backend.pinVcpu(vcpu_num, pinlist)
def vcpu_info(self):
if self.is_active() and self.getvcpus_supported:
return self._backend.vcpus()
return [[], []]
def get_autostart(self):
return self._backend.autostart()
def set_autostart(self, val):
if self.get_autostart() == val:
return
self._backend.setAutostart(val)
def job_info(self):
return self._backend.jobInfo()
def abort_job(self):
self._backend.abortJob()
def open_console(self, devname, stream, flags=0):
return self._backend.openConsole(devname, stream, flags)
def refresh_snapshots(self):
self._snapshot_list = None
def list_snapshots(self):
if self._snapshot_list is None:
newlist = []
for rawsnap in self._backend.listAllSnapshots():
newlist.append(vmmDomainSnapshot(self.conn, rawsnap))
self._snapshot_list = newlist
return self._snapshot_list[:]
def revert_to_snapshot(self, snap):
self._backend.revertToSnapshot(snap.get_backend())
self.idle_add(self.force_update_status)
def create_snapshot(self, xml, redefine=False):
flags = 0
if redefine:
flags = (flags | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE)
if not redefine:
logging.debug("Creating snapshot flags=%s xml=\n%s", flags, xml)
self._backend.snapshotCreateXML(xml, flags)
########################
# XML Parsing routines #
########################
def is_container(self):
return self.get_xmlobj().os.is_container()
def is_xenpv(self):
return self.get_xmlobj().os.is_xenpv()
def is_hvm(self):
return self.get_xmlobj().os.is_hvm()
def get_uuid(self):
return self.uuid
def get_abi_type(self):
return self.get_xmlobj().os.os_type
def get_hv_type(self):
return self.get_xmlobj().type
def get_pretty_hv_type(self):
return uihelpers.pretty_hv(self.get_abi_type(), self.get_hv_type())
def get_arch(self):
return self.get_xmlobj().os.arch
def get_init(self):
return self.get_xmlobj().os.init
def get_emulator(self):
return self.get_xmlobj().emulator
def get_acpi(self):
return self.get_xmlobj().features.acpi
def get_apic(self):
return self.get_xmlobj().features.apic
def get_clock(self):
return self.get_xmlobj().clock.offset
def get_machtype(self):
return self.get_xmlobj().os.machine
def get_description(self):
# Always show the inactive <description>, let's us fake hotplug
# for a field that's strictly metadata
return self.get_xmlobj(inactive=True).description
def get_memory(self):
return int(self.get_xmlobj().memory)
def maximum_memory(self):
return int(self.get_xmlobj().maxmemory)
def vcpu_count(self):
guest = self.get_xmlobj()
return int(guest.curvcpus or
self._startup_vcpus or
guest.vcpus)
def vcpu_max_count(self):
guest = self.get_xmlobj()
has_xml_max = (guest.curvcpus != guest.vcpus)
if has_xml_max or not self.is_active():
return guest.vcpus
if self._startup_vcpus is None:
self._startup_vcpus = int(self.vcpu_count())
return int(self._startup_vcpus)
def vcpu_pinning(self):
return self.get_xmlobj().cpuset or ""
def get_cpu_config(self):
return self.get_xmlobj().cpu
def get_boot_device(self):
return self.get_xmlobj().os.bootorder
def get_boot_menu(self):
guest = self.get_xmlobj()
return bool(guest.os.enable_bootmenu)
def get_boot_kernel_info(self):
guest = self.get_xmlobj()
return (guest.os.kernel, guest.os.initrd,
guest.os.dtb, guest.os.kernel_args)
def get_seclabel(self):
seclabel = self.get_xmlobj().seclabel
model = seclabel.model
t = seclabel.type or "dynamic"
label = seclabel.label or ""
relabel = getattr(seclabel, "relabel", None)
if relabel is not None:
if relabel == "yes":
relabel = True
else:
relabel = False
return [model, t, label, relabel]
# XML Device listing
def get_serial_devs(self):
devs = self.get_char_devices()
devlist = []
devlist += [x for x in devs if x.virtual_device_type == "serial"]
devlist += [x for x in devs if x.virtual_device_type == "console"]
return devlist
def _build_device_list(self, device_type,
refresh_if_nec=True, inactive=False):
guest = self.get_xmlobj(refresh_if_nec=refresh_if_nec,
inactive=inactive)
devs = guest.get_devices(device_type)
count = 0
for dev in devs:
dev.vmmindex = count
count += 1
return devs
def get_network_devices(self, refresh_if_nec=True):
return self._build_device_list("interface", refresh_if_nec)
def get_video_devices(self):
return self._build_device_list("video")
def get_hostdev_devices(self):
return self._build_device_list("hostdev")
def get_watchdog_devices(self):
return self._build_device_list("watchdog")
def get_input_devices(self):
return self._build_device_list("input")
def get_graphics_devices(self):
return self._build_device_list("graphics")
def get_sound_devices(self):
return self._build_device_list("sound")
def get_controller_devices(self):
return self._build_device_list("controller")
def get_filesystem_devices(self):
return self._build_device_list("filesystem")
def get_smartcard_devices(self):
return self._build_device_list("smartcard")
def get_redirdev_devices(self):
return self._build_device_list("redirdev")
def get_tpm_devices(self):
return self._build_device_list("tpm")
def get_rng_devices(self):
return self._build_device_list("rng")
def get_disk_devices(self, refresh_if_nec=True, inactive=False):
devs = self._build_device_list("disk", refresh_if_nec, inactive)
# Iterate through all disks and calculate what number they are
# HACK: We are making a variable in VirtualDisk to store the index
idx_mapping = {}
for dev in devs:
devtype = dev.device
bus = dev.bus
key = devtype + (bus or "")
if key not in idx_mapping:
idx_mapping[key] = 1
dev.disk_bus_index = idx_mapping[key]
idx_mapping[key] += 1
return devs
def get_char_devices(self):
devs = []
serials = self._build_device_list("serial")
parallels = self._build_device_list("parallel")
consoles = self._build_device_list("console")
channels = self._build_device_list("channel")
for devicelist in [serials, parallels, consoles, channels]:
devs.extend(devicelist)
# Don't display <console> if it's just a duplicate of <serial>
if (len(consoles) > 0 and len(serials) > 0):
con = consoles[0]
ser = serials[0]
if (con.type == ser.type and
con.target_type is None or con.target_type == "serial"):
ser.virtmanager_console_dup = con
devs.remove(con)
return devs
############################
# Domain lifecycle methods #
############################
# All these methods are usually run asynchronously from threads, so
# let's be extra careful and have anything which might touch UI
# or GObject.props invoked in an idle callback
def _unregister_reboot_listener(self):
if self.reboot_listener is None:
return
try:
self.idle_add(self.disconnect, self.reboot_listener)
self.reboot_listener = None
except:
pass
def manual_reboot(self):
"""
Attempt a manual reboot by invoking 'shutdown', then listen
for a state change and restart the VM
"""
def reboot_listener(vm, ignore1, ignore2, self):
if vm.is_crashed():
# Abandon reboot plans
self.reboot_listener = None
return True
if not vm.is_shutoff():
# Not shutoff, continue waiting
return
try:
logging.debug("Fake reboot detected shutdown. Restarting VM")
vm.startup()
except:
logging.exception("Fake reboot startup failed")
self.reboot_listener = None
return True
self._unregister_reboot_listener()
# Request a shutdown
self.shutdown()
def add_reboot():
self.reboot_listener = self.connect_opt_out("status-changed",
reboot_listener, self)
self.idle_add(add_reboot)
def shutdown(self):
self._install_abort = True
self._unregister_reboot_listener()
self._backend.shutdown()
self.idle_add(self.force_update_status)
def reboot(self):
self._install_abort = True
self._backend.reboot(0)
self.idle_add(self.force_update_status)
def destroy(self):
self._install_abort = True
self._unregister_reboot_listener()
self._backend.destroy()
self.idle_add(self.force_update_status)
def reset(self):
self._install_abort = True
self._backend.reset(0)
self.idle_add(self.force_update_status)
def startup(self):
if self.get_cloning():
raise RuntimeError(_("Cannot start guest while cloning "
"operation in progress"))
pre_startup_ret = []
self.emit("pre-startup", pre_startup_ret)
for error in pre_startup_ret:
raise RuntimeError(error)
self._backend.create()
self.idle_add(self.force_update_status)
def suspend(self):
self._backend.suspend()
self.idle_add(self.force_update_status)
def delete(self, force=True):
flags = 0
if force:
flags |= getattr(libvirt,
"VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA", 0)
flags |= getattr(libvirt, "VIR_DOMAIN_UNDEFINE_MANAGED_SAVE", 0)
try:
self._backend.undefineFlags(flags)
except libvirt.libvirtError:
logging.exception("libvirt undefineFlags failed, "
"falling back to old style")
self._backend.undefine()
def resume(self):
if self.get_cloning():
raise RuntimeError(_("Cannot resume guest while cloning "
"operation in progress"))
self._backend.resume()
self.idle_add(self.force_update_status)
def hasSavedImage(self):
if not self.managedsave_supported:
return False
try:
return self._backend.hasManagedSaveImage(0)
except libvirt.libvirtError, e:
if not uihelpers.exception_is_libvirt_error(e, "VIR_ERR_NO_DOMAIN"):
raise
return False
def removeSavedImage(self):
if not self.hasSavedImage():
return
self._backend.managedSaveRemove(0)
def save(self, filename=None, meter=None):
self._install_abort = True
if meter:
start_job_progress_thread(self, meter, _("Saving domain to disk"))
if not self.managedsave_supported:
self._backend.save(filename)
else:
self._backend.managedSave(0)
self.idle_add(self.force_update_status)
def support_downtime(self):
return self.conn.check_support(
self.conn.SUPPORT_DOMAIN_MIGRATE_DOWNTIME, self._backend)
def migrate_set_max_downtime(self, max_downtime, flag=0):
self._backend.migrateSetMaxDowntime(max_downtime, flag)
def migrate(self, destconn, interface=None, rate=0,
live=False, secure=False, meter=None):
self._install_abort = True
newname = None
flags = 0
if self.status() == libvirt.VIR_DOMAIN_RUNNING and live:
flags |= libvirt.VIR_MIGRATE_LIVE
if secure:
flags |= libvirt.VIR_MIGRATE_PEER2PEER
flags |= libvirt.VIR_MIGRATE_TUNNELLED
destconn = destconn.get_backend().libvirtconn
logging.debug("Migrating: conn=%s flags=%s dname=%s uri=%s rate=%s",
destconn, flags, newname, interface, rate)
if meter:
start_job_progress_thread(self, meter, _("Migrating domain"))
self._backend.migrate(destconn, flags, newname, interface, rate)
def define_cb():
newxml = self.get_xml(inactive=True)
destconn.define_domain(newxml)
self.idle_add(define_cb)
# Don't schedule any conn update, migrate dialog handles it for us
###################
# Stats helpers ###
###################
def _sample_mem_stats(self, info):
curmem = info[2]
if not self.is_active():
curmem = 0
pcentCurrMem = curmem * 100.0 / self.maximum_memory()
pcentCurrMem = max(0.0, min(pcentCurrMem, 100.0))
return pcentCurrMem, curmem
def _sample_cpu_stats(self, info, now):
prevCpuTime = 0
prevTimestamp = 0
cpuTime = 0
cpuTimeAbs = 0
pcentHostCpu = 0
pcentGuestCpu = 0
if len(self.record) > 0:
prevTimestamp = self.record[0]["timestamp"]
prevCpuTime = self.record[0]["cpuTimeAbs"]
if not (info[0] in [libvirt.VIR_DOMAIN_SHUTOFF,
libvirt.VIR_DOMAIN_CRASHED]):
guestcpus = info[3]
cpuTime = info[4] - prevCpuTime
cpuTimeAbs = info[4]
hostcpus = self.conn.host_active_processor_count()
pcentbase = (((cpuTime) * 100.0) /
((now - prevTimestamp) * 1000.0 * 1000.0 * 1000.0))
pcentHostCpu = pcentbase / hostcpus
pcentGuestCpu = pcentbase / guestcpus
pcentHostCpu = max(0.0, min(100.0, pcentHostCpu))
pcentGuestCpu = max(0.0, min(100.0, pcentGuestCpu))
return cpuTime, cpuTimeAbs, pcentHostCpu, pcentGuestCpu
def _get_cur_rate(self, what):
if len(self.record) > 1:
ret = (float(self.record[0][what] -
self.record[1][what]) /
float(self.record[0]["timestamp"] -
self.record[1]["timestamp"]))
else:
ret = 0.0
return max(ret, 0, 0) # avoid negative values at poweroff
def _set_max_rate(self, record, what):
if record[what] > self.maxRecord[what]:
self.maxRecord[what] = record[what]
def _get_max_rate(self, name1, name2):
return float(max(self.maxRecord[name1], self.maxRecord[name2]))
def _get_record_helper(self, record_name):
if len(self.record) == 0:
return 0
return self.record[0][record_name]
def _vector_helper(self, record_name):
vector = []
stats = self.record
for i in range(self.config.get_stats_history_length() + 1):
if i < len(stats):
vector.append(stats[i][record_name] / 100.0)
else:
vector.append(0)
return vector
def _in_out_vector_helper(self, name1, name2, ceil):
vector = []
stats = self.record
if ceil is None:
ceil = self._get_max_rate(name1, name2)
maxlen = self.config.get_stats_history_length()
for n in [name1, name2]:
for i in range(maxlen + 1):
if i < len(stats):
vector.append(float(stats[i][n]) / ceil)
else:
vector.append(0.0)
return vector
def in_out_vector_limit(self, data, limit):
l = len(data) / 2
end = min(l, limit)
if l > limit:
data = data[0:end] + data[l:l + end]
return [(x + y) / 2 for x, y in zip(data[0:end], data[end:end * 2])]
def toggle_sample_network_traffic(self, ignore=None):
self._enable_net_poll = self.config.get_stats_enable_net_poll()
if self._enable_net_poll and len(self.record) > 1:
rxBytes, txBytes = self._sample_network_traffic()
self.record[0]["netRxKB"] = rxBytes / 1024
self.record[0]["netTxKB"] = txBytes / 1024
def toggle_sample_disk_io(self, ignore=None):
self._enable_disk_poll = self.config.get_stats_enable_disk_poll()
if self._enable_disk_poll and len(self.record) > 1:
rdBytes, wrBytes = self._sample_disk_io()
self.record[0]["diskRdKB"] = rdBytes / 1024
self.record[0]["diskWrKB"] = wrBytes / 1024
###################
# Stats accessors #
###################
def stats_memory(self):
return self._get_record_helper("curmem")
def cpu_time(self):
return self._get_record_helper("cpuTime")
def host_cpu_time_percentage(self):
return self._get_record_helper("cpuHostPercent")
def guest_cpu_time_percentage(self):
return self._get_record_helper("cpuGuestPercent")
def network_rx_rate(self):
return self._get_record_helper("netRxRate")
def network_tx_rate(self):
return self._get_record_helper("netTxRate")
def disk_read_rate(self):
return self._get_record_helper("diskRdRate")
def disk_write_rate(self):
return self._get_record_helper("diskWrRate")
def get_memory_pretty(self):
return util.pretty_mem(self.get_memory())
def maximum_memory_pretty(self):
return util.pretty_mem(self.maximum_memory())
def network_traffic_rate(self):
return self.network_tx_rate() + self.network_rx_rate()
def network_traffic_max_rate(self):
return self._get_max_rate("netRxRate", "netTxRate")
def disk_io_rate(self):
return self.disk_read_rate() + self.disk_write_rate()
def disk_io_max_rate(self):
return self._get_max_rate("diskRdRate", "diskWrRate")
def host_cpu_time_vector(self):
return self._vector_helper("cpuHostPercent")
def guest_cpu_time_vector(self):
return self._vector_helper("cpuGuestPercent")
def stats_memory_vector(self):
return self._vector_helper("currMemPercent")
def network_traffic_vector(self, ceil=None):
return self._in_out_vector_helper("netRxRate", "netTxRate", ceil)
def disk_io_vector(self, ceil=None):
return self._in_out_vector_helper("diskRdRate", "diskWrRate", ceil)
def host_cpu_time_vector_limit(self, limit):
cpudata = self.host_cpu_time_vector()
if len(cpudata) > limit:
cpudata = cpudata[0:limit]
return cpudata
def guest_cpu_time_vector_limit(self, limit):
cpudata = self.guest_cpu_time_vector()
if len(cpudata) > limit:
cpudata = cpudata[0:limit]
return cpudata
def network_traffic_vector_limit(self, limit, ceil=None):
return self.in_out_vector_limit(self.network_traffic_vector(ceil),
limit)
def disk_io_vector_limit(self, limit, ceil=None):
return self.in_out_vector_limit(self.disk_io_vector(ceil), limit)
###################
# Status helpers ##
###################
def _update_start_vcpus(self, ignore, oldstatus, status):
ignore = status
if oldstatus not in [libvirt.VIR_DOMAIN_SHUTDOWN,
libvirt.VIR_DOMAIN_SHUTOFF,
libvirt.VIR_DOMAIN_CRASHED]:
return
# Want to track the startup vcpu amount, which is the
# cap of how many VCPUs can be added
self._startup_vcpus = None
self.vcpu_max_count()
def _normalize_status(self, status):
if status == libvirt.VIR_DOMAIN_NOSTATE:
return libvirt.VIR_DOMAIN_RUNNING
elif status == libvirt.VIR_DOMAIN_BLOCKED:
return libvirt.VIR_DOMAIN_RUNNING
return status
def is_active(self):
return not self.is_shutoff()
def is_shutoff(self):
return self.status() == libvirt.VIR_DOMAIN_SHUTOFF
def is_crashed(self):
return self.status() == libvirt.VIR_DOMAIN_CRASHED
def is_stoppable(self):
return self.status() in [libvirt.VIR_DOMAIN_RUNNING,
libvirt.VIR_DOMAIN_PAUSED]
def is_destroyable(self):
return (self.is_stoppable() or
self.status() in [libvirt.VIR_DOMAIN_CRASHED])
def is_runable(self):
return self.status() in [libvirt.VIR_DOMAIN_SHUTOFF,
libvirt.VIR_DOMAIN_CRASHED]
def is_pauseable(self):
return self.status() in [libvirt.VIR_DOMAIN_RUNNING]
def is_unpauseable(self):
return self.status() in [libvirt.VIR_DOMAIN_PAUSED]
def is_paused(self):
return self.status() in [libvirt.VIR_DOMAIN_PAUSED]
def run_status(self):
return self.pretty_run_status(self.status(), self.hasSavedImage())
def run_status_icon_name(self):
status = self.status()
if status not in uihelpers.vm_status_icons:
logging.debug("Unknown status %d, using NOSTATE", status)
status = libvirt.VIR_DOMAIN_NOSTATE
return uihelpers.vm_status_icons[status]
def force_update_status(self):
"""
Fetch current domain state and clear status cache
"""
try:
info = self._backend.info()
self._update_status(info[0])
except libvirt.libvirtError, e:
if uihelpers.exception_is_libvirt_error(e, "VIR_ERR_NO_DOMAIN"):
return
raise
def _update_status(self, status):
"""
Internal helper to change cached status to 'status' and signal
clients if we actually changed state
"""
status = self._normalize_status(status)
if status == self.lastStatus:
return
oldstatus = self.lastStatus
self.lastStatus = status
# Send 'config-changed' before a status-update, so users
# are operating with fresh XML
self.refresh_xml()
self.idle_emit("status-changed", oldstatus, status)
def inspection_data_updated(self):
self.idle_emit("inspection-changed")
##################
# config helpers #
##################
def on_console_scaling_changed(self, *args, **kwargs):
return self.config.listen_pervm(self.uuid, "/scaling",
*args, **kwargs)
def set_console_scaling(self, value):
self.config.set_pervm(self.uuid, "/scaling", value)
def get_console_scaling(self):
ret = self.config.get_pervm(self.uuid, "/scaling")
if ret == -1:
return self.config.get_console_scaling()
return ret
def set_details_window_size(self, w, h):
self.config.set_pervm(self.uuid, "/vm-window-size", (w, h))
def get_details_window_size(self):
ret = self.config.get_pervm(self.uuid, "/vm-window-size")
return ret
def get_console_password(self):
return self.config.get_pervm(self.uuid, "/console-password")
def set_console_password(self, username, keyid):
return self.config.set_pervm(self.uuid, "/console-password",
(username, keyid))
def get_cache_dir(self):
ret = os.path.join(self.conn.get_cache_dir(), self.get_uuid())
if not os.path.exists(ret):
os.makedirs(ret, 0755)
return ret
###################
# Polling helpers #
###################
def _sample_network_traffic(self):
rx = 0
tx = 0
if (not self._stats_net_supported or
not self._enable_net_poll or
not self.is_active()):
return rx, tx
for netdev in self.get_network_devices(refresh_if_nec=False):
dev = netdev.target_dev
if not dev:
continue
if dev in self._stats_net_skip:
continue
try:
io = self._backend.interfaceStats(dev)
if io:
rx += io[0]
tx += io[4]
except libvirt.libvirtError, err:
if util.is_error_nosupport(err):
logging.debug("Net stats not supported: %s", err)
self._stats_net_supported = False
else:
logging.error("Error reading net stats for "
"'%s' dev '%s': %s",
self.get_name(), dev, err)
if self.is_active():
logging.debug("Adding %s to skip list", dev)
self._stats_net_skip.append(dev)
else:
logging.debug("Aren't running, don't add to skiplist")
return rx, tx
def _sample_disk_io(self):
rd = 0
wr = 0
if (not self._stats_disk_supported or
not self._enable_disk_poll or
not self.is_active()):
return rd, wr
for disk in self.get_disk_devices(refresh_if_nec=False):
dev = disk.target
if not dev:
continue
if dev in self._stats_disk_skip:
continue
try:
io = self._backend.blockStats(dev)
if io:
rd += io[1]
wr += io[3]
except libvirt.libvirtError, err:
if util.is_error_nosupport(err):
logging.debug("Disk stats not supported: %s", err)
self._stats_disk_supported = False
else:
logging.error("Error reading disk stats for "
"'%s' dev '%s': %s",
self.get_name(), dev, err)
if self.is_active():
logging.debug("Adding %s to skip list", dev)
self._stats_disk_skip.append(dev)
else:
logging.debug("Aren't running, don't add to skiplist")
return rd, wr
def tick(self, stats_update=True):
self._invalidate_xml()
info = self._backend.info()
if stats_update:
self._tick_stats(info)
self._update_status(info[0])
if stats_update:
self.idle_emit("resources-sampled")
def _tick_stats(self, info):
expected = self.config.get_stats_history_length()
current = len(self.record)
if current > expected:
del self.record[expected:current]
# Xen reports complete crap for Dom0 max memory
# (ie MAX_LONG) so lets clamp it to the actual
# physical RAM in machine which is the effective
# real world limit
if (self.conn.is_xen() and
self.is_management_domain()):
info[1] = self.conn.host_memory_size()
now = time.time()
(cpuTime, cpuTimeAbs,
pcentHostCpu, pcentGuestCpu) = self._sample_cpu_stats(info, now)
pcentCurrMem, curmem = self._sample_mem_stats(info)
rdBytes, wrBytes = self._sample_disk_io()
rxBytes, txBytes = self._sample_network_traffic()
newStats = {
"timestamp": now,
"cpuTime": cpuTime,
"cpuTimeAbs": cpuTimeAbs,
"cpuHostPercent": pcentHostCpu,
"cpuGuestPercent": pcentGuestCpu,
"curmem": curmem,
"currMemPercent": pcentCurrMem,
"diskRdKB": rdBytes / 1024,
"diskWrKB": wrBytes / 1024,
"netRxKB": rxBytes / 1024,
"netTxKB": txBytes / 1024,
}
for r in ["diskRd", "diskWr", "netRx", "netTx"]:
newStats[r + "Rate"] = self._get_cur_rate(r + "KB")
self._set_max_rate(newStats, r + "Rate")
self.record.insert(0, newStats)
########################
# Libvirt domain class #
########################
class vmmDomainVirtinst(vmmDomain):
"""
Domain object backed by a virtinst Guest object.
Used for launching a details window for customizing a VM before install.
"""
def __init__(self, conn, backend, key):
vmmDomain.__init__(self, conn, backend, key)
self._orig_xml = ""
def get_name(self):
return self._backend.name
def get_id(self):
return -1
def hasSavedImage(self):
return False
def _XMLDesc(self, flags):
raise RuntimeError("Shouldn't be called")
def get_xml(self, *args, **kwargs):
ignore = args
ignore = kwargs
return self._backend.get_install_xml(install=False)
def _refresh_orig_xml(self):
# We need to cache origxml in order to have something to diff against
if not self._orig_xml:
self._orig_xml = self._backend.get_xml_config()
def get_xmlobj(self, inactive=False, refresh_if_nec=True):
self._refresh_orig_xml()
return self._backend
def _reparse_xml(self, *args, **kwargs):
ignore = args
ignore = kwargs
def _define(self, newxml):
ignore = newxml
self._orig_xml = ""
self.emit("config-changed")
def _redefine_xml(self, newxml):
return self._redefine_helper(self._orig_xml, newxml)
def refresh_xml(self, forcesignal=False):
# No caching, so no refresh needed
return
def snapshots_supported(self):
return False
def get_autostart(self):
return self._backend.autostart
def set_autostart(self, val):
self._backend.autostart = bool(val)
self.emit("config-changed")
def define_name(self, newname):
def change(guest):
guest.name = str(newname)
return self._redefine(change)
|
gpl-2.0
| 5,290,043,022,076,482,000
| 32.375741
| 80
| 0.56443
| false
| 3.997159
| false
| false
| false
|
ccpgames/eve-metrics
|
web2py/gluon/contrib/fpdf/fpdf.py
|
1
|
73164
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
# ****************************************************************************
# * Software: FPDF for python *
# * Version: 1.7.1 *
# * Date: 2010-09-10 *
# * Last update: 2012-08-16 *
# * License: LGPL v3.0 *
# * *
# * Original Author (PHP): Olivier PLATHEY 2004-12-31 *
# * Ported to Python 2.4 by Max (maxpat78@yahoo.it) on 2006-05 *
# * Maintainer: Mariano Reingart (reingart@gmail.com) et al since 2008 est. *
# * NOTE: 'I' and 'D' destinations are disabled, and simply print to STDOUT *
# ****************************************************************************
from datetime import datetime
import math
import errno
import os, sys, zlib, struct, re, tempfile, struct
try:
import cPickle as pickle
except ImportError:
import pickle
# Check if PIL is available (tries importing both pypi version and corrected or manually installed versions).
# Necessary for JPEG and GIF support.
try:
try:
import Image
except:
from PIL import Image
except ImportError:
Image = None
from ttfonts import TTFontFile
from fonts import fpdf_charwidths
from php import substr, sprintf, print_r, UTF8ToUTF16BE, UTF8StringToArray
# Global variables
FPDF_VERSION = '1.7.1'
FPDF_FONT_DIR = os.path.join(os.path.dirname(__file__),'font')
SYSTEM_TTFONTS = None
PY3K = sys.version_info >= (3, 0)
def set_global(var, val):
globals()[var] = val
class FPDF(object):
"PDF Generation class"
def __init__(self, orientation='P',unit='mm',format='A4'):
# Some checks
self._dochecks()
# Initialization of properties
self.offsets={} # array of object offsets
self.page=0 # current page number
self.n=2 # current object number
self.buffer='' # buffer holding in-memory PDF
self.pages={} # array containing pages
self.orientation_changes={} # array indicating orientation changes
self.state=0 # current document state
self.fonts={} # array of used fonts
self.font_files={} # array of font files
self.diffs={} # array of encoding differences
self.images={} # array of used images
self.page_links={} # array of links in pages
self.links={} # array of internal links
self.in_footer=0 # flag set when processing footer
self.lastw=0
self.lasth=0 # height of last cell printed
self.font_family='' # current font family
self.font_style='' # current font style
self.font_size_pt=12 # current font size in points
self.underline=0 # underlining flag
self.draw_color='0 G'
self.fill_color='0 g'
self.text_color='0 g'
self.color_flag=0 # indicates whether fill and text colors are different
self.ws=0 # word spacing
self.angle=0
# Standard fonts
self.core_fonts={'courier':'Courier','courierB':'Courier-Bold','courierI':'Courier-Oblique','courierBI':'Courier-BoldOblique',
'helvetica':'Helvetica','helveticaB':'Helvetica-Bold','helveticaI':'Helvetica-Oblique','helveticaBI':'Helvetica-BoldOblique',
'times':'Times-Roman','timesB':'Times-Bold','timesI':'Times-Italic','timesBI':'Times-BoldItalic',
'symbol':'Symbol','zapfdingbats':'ZapfDingbats'}
# Scale factor
if(unit=='pt'):
self.k=1
elif(unit=='mm'):
self.k=72/25.4
elif(unit=='cm'):
self.k=72/2.54
elif(unit=='in'):
self.k=72
else:
self.error('Incorrect unit: '+unit)
# Page format
if(isinstance(format,basestring)):
format=format.lower()
if(format=='a3'):
format=(841.89,1190.55)
elif(format=='a4'):
format=(595.28,841.89)
elif(format=='a5'):
format=(420.94,595.28)
elif(format=='letter'):
format=(612,792)
elif(format=='legal'):
format=(612,1008)
else:
self.error('Unknown page format: '+format)
self.fw_pt=format[0]
self.fh_pt=format[1]
else:
self.fw_pt=format[0]*self.k
self.fh_pt=format[1]*self.k
self.fw=self.fw_pt/self.k
self.fh=self.fh_pt/self.k
# Page orientation
orientation=orientation.lower()
if(orientation=='p' or orientation=='portrait'):
self.def_orientation='P'
self.w_pt=self.fw_pt
self.h_pt=self.fh_pt
elif(orientation=='l' or orientation=='landscape'):
self.def_orientation='L'
self.w_pt=self.fh_pt
self.h_pt=self.fw_pt
else:
self.error('Incorrect orientation: '+orientation)
self.cur_orientation=self.def_orientation
self.w=self.w_pt/self.k
self.h=self.h_pt/self.k
# Page margins (1 cm)
margin=28.35/self.k
self.set_margins(margin,margin)
# Interior cell margin (1 mm)
self.c_margin=margin/10.0
# line width (0.2 mm)
self.line_width=.567/self.k
# Automatic page break
self.set_auto_page_break(1,2*margin)
# Full width display mode
self.set_display_mode('fullwidth')
# Enable compression
self.set_compression(1)
# Set default PDF version number
self.pdf_version='1.3'
def set_margins(self, left,top,right=-1):
"Set left, top and right margins"
self.l_margin=left
self.t_margin=top
if(right==-1):
right=left
self.r_margin=right
def set_left_margin(self, margin):
"Set left margin"
self.l_margin=margin
if(self.page>0 and self.x<margin):
self.x=margin
def set_top_margin(self, margin):
"Set top margin"
self.t_margin=margin
def set_right_margin(self, margin):
"Set right margin"
self.r_margin=margin
def set_auto_page_break(self, auto,margin=0):
"Set auto page break mode and triggering margin"
self.auto_page_break=auto
self.b_margin=margin
self.page_break_trigger=self.h-margin
def set_display_mode(self, zoom,layout='continuous'):
"Set display mode in viewer"
if(zoom=='fullpage' or zoom=='fullwidth' or zoom=='real' or zoom=='default' or not isinstance(zoom,basestring)):
self.zoom_mode=zoom
else:
self.error('Incorrect zoom display mode: '+zoom)
if(layout=='single' or layout=='continuous' or layout=='two' or layout=='default'):
self.layout_mode=layout
else:
self.error('Incorrect layout display mode: '+layout)
def set_compression(self, compress):
"Set page compression"
self.compress=compress
def set_title(self, title):
"Title of document"
self.title=title
def set_subject(self, subject):
"Subject of document"
self.subject=subject
def set_author(self, author):
"Author of document"
self.author=author
def set_keywords(self, keywords):
"Keywords of document"
self.keywords=keywords
def set_creator(self, creator):
"Creator of document"
self.creator=creator
def alias_nb_pages(self, alias='{nb}'):
"Define an alias for total number of pages"
self.str_alias_nb_pages=alias
return alias
def error(self, msg):
"Fatal error"
raise RuntimeError('FPDF error: '+msg)
def open(self):
"Begin document"
self.state=1
def close(self):
"Terminate document"
if(self.state==3):
return
if(self.page==0):
self.add_page()
#Page footer
self.in_footer=1
self.footer()
self.in_footer=0
#close page
self._endpage()
#close document
self._enddoc()
def add_page(self, orientation=''):
"Start a new page"
if(self.state==0):
self.open()
family=self.font_family
if self.underline:
style = self.font_style + 'U'
else:
style = self.font_style
size=self.font_size_pt
lw=self.line_width
dc=self.draw_color
fc=self.fill_color
tc=self.text_color
cf=self.color_flag
if(self.page>0):
#Page footer
self.in_footer=1
self.footer()
self.in_footer=0
#close page
self._endpage()
#Start new page
self._beginpage(orientation)
#Set line cap style to square
self._out('2 J')
#Set line width
self.line_width=lw
self._out(sprintf('%.2f w',lw*self.k))
#Set font
if(family):
self.set_font(family,style,size)
#Set colors
self.draw_color=dc
if(dc!='0 G'):
self._out(dc)
self.fill_color=fc
if(fc!='0 g'):
self._out(fc)
self.text_color=tc
self.color_flag=cf
#Page header
self.header()
#Restore line width
if(self.line_width!=lw):
self.line_width=lw
self._out(sprintf('%.2f w',lw*self.k))
#Restore font
if(family):
self.set_font(family,style,size)
#Restore colors
if(self.draw_color!=dc):
self.draw_color=dc
self._out(dc)
if(self.fill_color!=fc):
self.fill_color=fc
self._out(fc)
self.text_color=tc
self.color_flag=cf
def header(self):
"Header to be implemented in your own inherited class"
pass
def footer(self):
"Footer to be implemented in your own inherited class"
pass
def page_no(self):
"Get current page number"
return self.page
def set_draw_color(self, r,g=-1,b=-1):
"Set color for all stroking operations"
if((r==0 and g==0 and b==0) or g==-1):
self.draw_color=sprintf('%.3f G',r/255.0)
else:
self.draw_color=sprintf('%.3f %.3f %.3f RG',r/255.0,g/255.0,b/255.0)
if(self.page>0):
self._out(self.draw_color)
def set_fill_color(self,r,g=-1,b=-1):
"Set color for all filling operations"
if((r==0 and g==0 and b==0) or g==-1):
self.fill_color=sprintf('%.3f g',r/255.0)
else:
self.fill_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0)
self.color_flag=(self.fill_color!=self.text_color)
if(self.page>0):
self._out(self.fill_color)
def set_text_color(self, r,g=-1,b=-1):
"Set color for text"
if((r==0 and g==0 and b==0) or g==-1):
self.text_color=sprintf('%.3f g',r/255.0)
else:
self.text_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0)
self.color_flag=(self.fill_color!=self.text_color)
def get_string_width(self, s):
"Get width of a string in the current font"
cw=self.current_font['cw']
w=0
l=len(s)
if self.unifontsubset:
for char in s:
char = ord(char)
if len(cw) > char:
w += cw[char] # ord(cw[2*char])<<8 + ord(cw[2*char+1])
#elif (char>0 and char<128 and isset($cw[chr($char)])) { $w += $cw[chr($char)]; }
elif (self.current_font['desc']['MissingWidth']) :
w += self.current_font['desc']['MissingWidth']
#elif (isset($this->CurrentFont['MissingWidth'])) { $w += $this->CurrentFont['MissingWidth']; }
else:
w += 500
else:
for i in xrange(0, l):
w += cw.get(s[i],0)
return w*self.font_size/1000.0
def set_line_width(self, width):
"Set line width"
self.line_width=width
if(self.page>0):
self._out(sprintf('%.2f w',width*self.k))
def line(self, x1,y1,x2,y2):
"Draw a line"
self._out(sprintf('%.2f %.2f m %.2f %.2f l S',x1*self.k,(self.h-y1)*self.k,x2*self.k,(self.h-y2)*self.k))
def _set_dash(self, dash_length=False, space_length=False):
if(dash_length and space_length):
s = sprintf('[%.3f %.3f] 0 d', dash_length*self.k, space_length*self.k)
else:
s = '[] 0 d'
self._out(s)
def dashed_line(self, x1,y1,x2,y2, dash_length=1, space_length=1):
"""Draw a dashed line. Same interface as line() except:
- dash_length: Length of the dash
- space_length: Length of the space between dashes"""
self._set_dash(dash_length, space_length)
self.line(x1, y1, x2, y2)
self._set_dash()
def rect(self, x,y,w,h,style=''):
"Draw a rectangle"
if(style=='F'):
op='f'
elif(style=='FD' or style=='DF'):
op='B'
else:
op='S'
self._out(sprintf('%.2f %.2f %.2f %.2f re %s',x*self.k,(self.h-y)*self.k,w*self.k,-h*self.k,op))
def add_font(self, family, style='', fname='', uni=False):
"Add a TrueType or Type1 font"
family = family.lower()
if (fname == ''):
fname = family.replace(' ','') + style.lower() + '.pkl'
if (family == 'arial'):
family = 'helvetica'
style = style.upper()
if (style == 'IB'):
style = 'BI'
fontkey = family+style
if fontkey in self.fonts:
# Font already added!
return
if (uni):
global SYSTEM_TTFONTS
if os.path.exists(fname):
ttffilename = fname
elif (FPDF_FONT_DIR and
os.path.exists(os.path.join(FPDF_FONT_DIR, fname))):
ttffilename = os.path.join(FPDF_FONT_DIR, fname)
elif (SYSTEM_TTFONTS and
os.path.exists(os.path.join(SYSTEM_TTFONTS, fname))):
ttffilename = os.path.join(SYSTEM_TTFONTS, fname)
else:
raise RuntimeError("TTF Font file not found: %s" % fname)
unifilename = os.path.splitext(ttffilename)[0] + '.pkl'
name = ''
if os.path.exists(unifilename):
fh = open(unifilename)
try:
font_dict = pickle.load(fh)
finally:
fh.close()
else:
ttf = TTFontFile()
ttf.getMetrics(ttffilename)
desc = {
'Ascent': int(round(ttf.ascent, 0)),
'Descent': int(round(ttf.descent, 0)),
'CapHeight': int(round(ttf.capHeight, 0)),
'Flags': ttf.flags,
'FontBBox': "[%s %s %s %s]" % (
int(round(ttf.bbox[0], 0)),
int(round(ttf.bbox[1], 0)),
int(round(ttf.bbox[2], 0)),
int(round(ttf.bbox[3], 0))),
'ItalicAngle': int(ttf.italicAngle),
'StemV': int(round(ttf.stemV, 0)),
'MissingWidth': int(round(ttf.defaultWidth, 0)),
}
# Generate metrics .pkl file
font_dict = {
'name': re.sub('[ ()]', '', ttf.fullName),
'type': 'TTF',
'desc': desc,
'up': round(ttf.underlinePosition),
'ut': round(ttf.underlineThickness),
'ttffile': ttffilename,
'fontkey': fontkey,
'originalsize': os.stat(ttffilename).st_size,
'cw': ttf.charWidths,
}
try:
fh = open(unifilename, "w")
pickle.dump(font_dict, fh)
fh.close()
except IOError, e:
if not e.errno == errno.EACCES:
raise # Not a permission error.
del ttf
if hasattr(self,'str_alias_nb_pages'):
sbarr = range(0,57) # include numbers in the subset!
else:
sbarr = range(0,32)
self.fonts[fontkey] = {
'i': len(self.fonts)+1, 'type': font_dict['type'],
'name': font_dict['name'], 'desc': font_dict['desc'],
'up': font_dict['up'], 'ut': font_dict['ut'],
'cw': font_dict['cw'],
'ttffile': font_dict['ttffile'], 'fontkey': fontkey,
'subset': sbarr, 'unifilename': unifilename,
}
self.font_files[fontkey] = {'length1': font_dict['originalsize'],
'type': "TTF", 'ttffile': ttffilename}
self.font_files[fname] = {'type': "TTF"}
else:
fontfile = open(fname)
try:
font_dict = pickle.load(fontfile)
finally:
fontfile.close()
self.fonts[fontkey] = {'i': len(self.fonts)+1}
self.fonts[fontkey].update(font_dict)
if (diff):
#Search existing encodings
d = 0
nb = len(self.diffs)
for i in xrange(1, nb+1):
if(self.diffs[i] == diff):
d = i
break
if (d == 0):
d = nb + 1
self.diffs[d] = diff
self.fonts[fontkey]['diff'] = d
filename = font_dict.get('filename')
if (filename):
if (type == 'TrueType'):
self.font_files[filename]={'length1': originalsize}
else:
self.font_files[filename]={'length1': size1,
'length2': size2}
def set_font(self, family,style='',size=0):
"Select a font; size given in points"
family=family.lower()
if(family==''):
family=self.font_family
if(family=='arial'):
family='helvetica'
elif(family=='symbol' or family=='zapfdingbats'):
style=''
style=style.upper()
if('U' in style):
self.underline=1
style=style.replace('U','')
else:
self.underline=0
if(style=='IB'):
style='BI'
if(size==0):
size=self.font_size_pt
#Test if font is already selected
if(self.font_family==family and self.font_style==style and self.font_size_pt==size):
return
#Test if used for the first time
fontkey=family+style
if fontkey not in self.fonts:
#Check if one of the standard fonts
if fontkey in self.core_fonts:
if fontkey not in fpdf_charwidths:
#Load metric file
name=os.path.join(FPDF_FONT_DIR,family)
if(family=='times' or family=='helvetica'):
name+=style.lower()
execfile(name+'.font')
if fontkey not in fpdf_charwidths:
self.error('Could not include font metric file for'+fontkey)
i=len(self.fonts)+1
self.fonts[fontkey]={'i':i,'type':'core','name':self.core_fonts[fontkey],'up':-100,'ut':50,'cw':fpdf_charwidths[fontkey]}
else:
self.error('Undefined font: '+family+' '+style)
#Select it
self.font_family=family
self.font_style=style
self.font_size_pt=size
self.font_size=size/self.k
self.current_font=self.fonts[fontkey]
self.unifontsubset = (self.fonts[fontkey]['type'] == 'TTF')
if(self.page>0):
self._out(sprintf('BT /F%d %.2f Tf ET',self.current_font['i'],self.font_size_pt))
def set_font_size(self, size):
"Set font size in points"
if(self.font_size_pt==size):
return
self.font_size_pt=size
self.font_size=size/self.k
if(self.page>0):
self._out(sprintf('BT /F%d %.2f Tf ET',self.current_font['i'],self.font_size_pt))
def add_link(self):
"Create a new internal link"
n=len(self.links)+1
self.links[n]=(0,0)
return n
def set_link(self, link,y=0,page=-1):
"Set destination of internal link"
if(y==-1):
y=self.y
if(page==-1):
page=self.page
self.links[link]=[page,y]
def link(self, x,y,w,h,link):
"Put a link on the page"
if not self.page in self.page_links:
self.page_links[self.page] = []
self.page_links[self.page] += [(x*self.k,self.h_pt-y*self.k,w*self.k,h*self.k,link),]
def text(self, x, y, txt=''):
"Output a string"
txt = self.normalize_text(txt)
if (self.unifontsubset):
txt2 = self._escape(UTF8ToUTF16BE(txt, False))
for uni in UTF8StringToArray(txt):
self.current_font['subset'].append(uni)
else:
txt2 = self._escape(txt)
s=sprintf('BT %.2f %.2f Td (%s) Tj ET',x*self.k,(self.h-y)*self.k, txt2)
if(self.underline and txt!=''):
s+=' '+self._dounderline(x,y,txt)
if(self.color_flag):
s='q '+self.text_color+' '+s+' Q'
self._out(s)
def rotate(self, angle, x=None, y=None):
if x is None:
x = self.x
if y is None:
y = self.y;
if self.angle!=0:
self._out('Q')
self.angle = angle
if angle!=0:
angle *= math.pi/180;
c = math.cos(angle);
s = math.sin(angle);
cx = x*self.k;
cy = (self.h-y)*self.k
s = sprintf('q %.5F %.5F %.5F %.5F %.2F %.2F cm 1 0 0 1 %.2F %.2F cm',c,s,-s,c,cx,cy,-cx,-cy)
self._out(s)
def accept_page_break(self):
"Accept automatic page break or not"
return self.auto_page_break
def cell(self, w,h=0,txt='',border=0,ln=0,align='',fill=0,link=''):
"Output a cell"
txt = self.normalize_text(txt)
k=self.k
if(self.y+h>self.page_break_trigger and not self.in_footer and self.accept_page_break()):
#Automatic page break
x=self.x
ws=self.ws
if(ws>0):
self.ws=0
self._out('0 Tw')
self.add_page(self.cur_orientation)
self.x=x
if(ws>0):
self.ws=ws
self._out(sprintf('%.3f Tw',ws*k))
if(w==0):
w=self.w-self.r_margin-self.x
s=''
if(fill==1 or border==1):
if(fill==1):
if border==1:
op='B'
else:
op='f'
else:
op='S'
s=sprintf('%.2f %.2f %.2f %.2f re %s ',self.x*k,(self.h-self.y)*k,w*k,-h*k,op)
if(isinstance(border,basestring)):
x=self.x
y=self.y
if('L' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-y)*k,x*k,(self.h-(y+h))*k)
if('T' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-y)*k,(x+w)*k,(self.h-y)*k)
if('R' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',(x+w)*k,(self.h-y)*k,(x+w)*k,(self.h-(y+h))*k)
if('B' in border):
s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-(y+h))*k,(x+w)*k,(self.h-(y+h))*k)
if(txt!=''):
if(align=='R'):
dx=w-self.c_margin-self.get_string_width(txt)
elif(align=='C'):
dx=(w-self.get_string_width(txt))/2.0
else:
dx=self.c_margin
if(self.color_flag):
s+='q '+self.text_color+' '
# If multibyte, Tw has no effect - do word spacing using an adjustment before each space
if (self.ws and self.unifontsubset):
for uni in UTF8StringToArray(txt):
self.current_font['subset'].append(uni)
space = self._escape(UTF8ToUTF16BE(' ', False))
s += sprintf('BT 0 Tw %.2F %.2F Td [',(self.x + dx) * k,(self.h - (self.y + 0.5*h+ 0.3 * self.font_size)) * k)
t = txt.split(' ')
numt = len(t)
for i in range(numt):
tx = t[i]
tx = '(' + self._escape(UTF8ToUTF16BE(tx, False)) + ')'
s += sprintf('%s ', tx);
if ((i+1)<numt):
adj = -(self.ws * self.k) * 1000 / self.font_size_pt
s += sprintf('%d(%s) ', adj, space)
s += '] TJ'
s += ' ET'
else:
if (self.unifontsubset):
txt2 = self._escape(UTF8ToUTF16BE(txt, False))
for uni in UTF8StringToArray(txt):
self.current_font['subset'].append(uni)
else:
txt2 = self._escape(txt)
s += sprintf('BT %.2f %.2f Td (%s) Tj ET',(self.x+dx)*k,(self.h-(self.y+.5*h+.3*self.font_size))*k,txt2)
if(self.underline):
s+=' '+self._dounderline(self.x+dx,self.y+.5*h+.3*self.font_size,txt)
if(self.color_flag):
s+=' Q'
if(link):
self.link(self.x+dx,self.y+.5*h-.5*self.font_size,self.get_string_width(txt),self.font_size,link)
if(s):
self._out(s)
self.lasth=h
if(ln>0):
#Go to next line
self.y+=h
if(ln==1):
self.x=self.l_margin
else:
self.x+=w
def multi_cell(self, w, h, txt='', border=0, align='J', fill=0, split_only=False):
"Output text with automatic or explicit line breaks"
txt = self.normalize_text(txt)
ret = [] # if split_only = True, returns splited text cells
cw=self.current_font['cw']
if(w==0):
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
s=txt.replace("\r",'')
nb=len(s)
if(nb>0 and s[nb-1]=="\n"):
nb-=1
b=0
if(border):
if(border==1):
border='LTRB'
b='LRT'
b2='LR'
else:
b2=''
if('L' in border):
b2+='L'
if('R' in border):
b2+='R'
if ('T' in border):
b=b2+'T'
else:
b=b2
sep=-1
i=0
j=0
l=0
ns=0
nl=1
while(i<nb):
#Get next character
c=s[i]
if(c=="\n"):
#Explicit line break
if(self.ws>0):
self.ws=0
if not split_only:
self._out('0 Tw')
if not split_only:
self.cell(w,h,substr(s,j,i-j),b,2,align,fill)
else:
ret.append(substr(s,j,i-j))
i+=1
sep=-1
j=i
l=0
ns=0
nl+=1
if(border and nl==2):
b=b2
continue
if(c==' '):
sep=i
ls=l
ns+=1
if self.unifontsubset:
l += self.get_string_width(c) / self.font_size*1000.0
else:
l += cw.get(c,0)
if(l>wmax):
#Automatic line break
if(sep==-1):
if(i==j):
i+=1
if(self.ws>0):
self.ws=0
if not split_only:
self._out('0 Tw')
if not split_only:
self.cell(w,h,substr(s,j,i-j),b,2,align,fill)
else:
ret.append(substr(s,j,i-j))
else:
if(align=='J'):
if ns>1:
self.ws=(wmax-ls)/1000.0*self.font_size/(ns-1)
else:
self.ws=0
if not split_only:
self._out(sprintf('%.3f Tw',self.ws*self.k))
if not split_only:
self.cell(w,h,substr(s,j,sep-j),b,2,align,fill)
else:
ret.append(substr(s,j,sep-j))
i=sep+1
sep=-1
j=i
l=0
ns=0
nl+=1
if(border and nl==2):
b=b2
else:
i+=1
#Last chunk
if(self.ws>0):
self.ws=0
if not split_only:
self._out('0 Tw')
if(border and 'B' in border):
b+='B'
if not split_only:
self.cell(w,h,substr(s,j,i-j),b,2,align,fill)
self.x=self.l_margin
else:
ret.append(substr(s,j,i-j))
return ret
def write(self, h, txt='', link=''):
"Output text in flowing mode"
txt = self.normalize_text(txt)
cw=self.current_font['cw']
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
s=txt.replace("\r",'')
nb=len(s)
sep=-1
i=0
j=0
l=0
nl=1
while(i<nb):
#Get next character
c=s[i]
if(c=="\n"):
#Explicit line break
self.cell(w,h,substr(s,j,i-j),0,2,'',0,link)
i+=1
sep=-1
j=i
l=0
if(nl==1):
self.x=self.l_margin
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
nl+=1
continue
if(c==' '):
sep=i
if self.unifontsubset:
l += self.get_string_width(c) / self.font_size*1000.0
else:
l += cw.get(c,0)
if(l>wmax):
#Automatic line break
if(sep==-1):
if(self.x>self.l_margin):
#Move to next line
self.x=self.l_margin
self.y+=h
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
i+=1
nl+=1
continue
if(i==j):
i+=1
self.cell(w,h,substr(s,j,i-j),0,2,'',0,link)
else:
self.cell(w,h,substr(s,j,sep-j),0,2,'',0,link)
i=sep+1
sep=-1
j=i
l=0
if(nl==1):
self.x=self.l_margin
w=self.w-self.r_margin-self.x
wmax=(w-2*self.c_margin)*1000.0/self.font_size
nl+=1
else:
i+=1
#Last chunk
if(i!=j):
self.cell(l/1000.0*self.font_size,h,substr(s,j),0,0,'',0,link)
def image(self, name, x=None, y=None, w=0,h=0,type='',link=''):
"Put an image on the page"
if not name in self.images:
#First use of image, get info
if(type==''):
pos=name.rfind('.')
if(not pos):
self.error('image file has no extension and no type was specified: '+name)
type=substr(name,pos+1)
type=type.lower()
if(type=='jpg' or type=='jpeg'):
info=self._parsejpg(name)
elif(type=='png'):
info=self._parsepng(name)
else:
#Allow for additional formats
#maybe the image is not showing the correct extension,
#but the header is OK,
succeed_parsing = False
#try all the parsing functions
parsing_functions = [self._parsejpg,self._parsepng,self._parsegif]
for pf in parsing_functions:
try:
info = pf(name)
succeed_parsing = True
break;
except:
pass
#last resource
if not succeed_parsing:
mtd='_parse'+type
if not hasattr(self,mtd):
self.error('Unsupported image type: '+type)
info=getattr(self, mtd)(name)
mtd='_parse'+type
if not hasattr(self,mtd):
self.error('Unsupported image type: '+type)
info=getattr(self, mtd)(name)
info['i']=len(self.images)+1
self.images[name]=info
else:
info=self.images[name]
#Automatic width and height calculation if needed
if(w==0 and h==0):
#Put image at 72 dpi
w=info['w']/self.k
h=info['h']/self.k
elif(w==0):
w=h*info['w']/info['h']
elif(h==0):
h=w*info['h']/info['w']
# Flowing mode
if y is None:
if (self.y + h > self.page_break_trigger and not self.in_footer and self.accept_page_break()):
#Automatic page break
x = self.x
self.add_page(self.cur_orientation)
self.x = x
y = self.y
self.y += h
if x is None:
x = self.x
self._out(sprintf('q %.2f 0 0 %.2f %.2f %.2f cm /I%d Do Q',w*self.k,h*self.k,x*self.k,(self.h-(y+h))*self.k,info['i']))
if(link):
self.link(x,y,w,h,link)
def ln(self, h=''):
"Line Feed; default value is last cell height"
self.x=self.l_margin
if(isinstance(h, basestring)):
self.y+=self.lasth
else:
self.y+=h
def get_x(self):
"Get x position"
return self.x
def set_x(self, x):
"Set x position"
if(x>=0):
self.x=x
else:
self.x=self.w+x
def get_y(self):
"Get y position"
return self.y
def set_y(self, y):
"Set y position and reset x"
self.x=self.l_margin
if(y>=0):
self.y=y
else:
self.y=self.h+y
def set_xy(self, x,y):
"Set x and y positions"
self.set_y(y)
self.set_x(x)
def output(self, name='',dest=''):
"Output PDF to some destination"
#Finish document if necessary
if(self.state<3):
self.close()
dest=dest.upper()
if(dest==''):
if(name==''):
name='doc.pdf'
dest='I'
else:
dest='F'
if dest=='I':
print self.buffer
elif dest=='D':
print self.buffer
elif dest=='F':
#Save to local file
f=open(name,'wb')
if(not f):
self.error('Unable to create output file: '+name)
if PY3K:
# TODO: proper unicode support
f.write(self.buffer.encode("latin1"))
else:
f.write(self.buffer)
f.close()
elif dest=='S':
#Return as a string
return self.buffer
else:
self.error('Incorrect output destination: '+dest)
return ''
def normalize_text(self, txt):
"Check that text input is in the correct format/encoding"
# - for TTF unicode fonts: unicode object (utf8 encoding)
# - for built-in fonts: string instances (latin 1 encoding)
if self.unifontsubset and isinstance(txt, str):
txt = txt.decode('utf8')
elif not self.unifontsubset and isinstance(txt, unicode) and not PY3K:
txt = txt.encode('latin1')
return txt
def _dochecks(self):
#Check for locale-related bug
# if(1.1==1):
# self.error("Don\'t alter the locale before including class file");
#Check for decimal separator
if(sprintf('%.1f',1.0)!='1.0'):
import locale
locale.setlocale(locale.LC_NUMERIC,'C')
def _getfontpath(self):
return FPDF_FONT_DIR+'/'
def _putpages(self):
nb=self.page
if hasattr(self,'str_alias_nb_pages'):
# Replace number of pages in fonts using subsets (unicode)
alias = UTF8ToUTF16BE(self.str_alias_nb_pages, False);
r = UTF8ToUTF16BE(str(nb), False)
for n in xrange(1, nb+1):
self.pages[n] = self.pages[n].replace(alias, r)
# Now repeat for no pages in non-subset fonts
for n in xrange(1,nb+1):
self.pages[n]=self.pages[n].replace(self.str_alias_nb_pages,str(nb))
if(self.def_orientation=='P'):
w_pt=self.fw_pt
h_pt=self.fh_pt
else:
w_pt=self.fh_pt
h_pt=self.fw_pt
if self.compress:
filter='/Filter /FlateDecode '
else:
filter=''
for n in xrange(1,nb+1):
#Page
self._newobj()
self._out('<</Type /Page')
self._out('/Parent 1 0 R')
if n in self.orientation_changes:
self._out(sprintf('/MediaBox [0 0 %.2f %.2f]',h_pt,w_pt))
self._out('/Resources 2 0 R')
if self.page_links and n in self.page_links:
#Links
annots='/Annots ['
for pl in self.page_links[n]:
rect=sprintf('%.2f %.2f %.2f %.2f',pl[0],pl[1],pl[0]+pl[2],pl[1]-pl[3])
annots+='<</Type /Annot /Subtype /Link /Rect ['+rect+'] /Border [0 0 0] '
if(isinstance(pl[4],basestring)):
annots+='/A <</S /URI /URI '+self._textstring(pl[4])+'>>>>'
else:
l=self.links[pl[4]]
if l[0] in self.orientation_changes:
h=w_pt
else:
h=h_pt
annots+=sprintf('/Dest [%d 0 R /XYZ 0 %.2f null]>>',1+2*l[0],h-l[1]*self.k)
self._out(annots+']')
if(self.pdf_version>'1.3'):
self._out('/Group <</Type /Group /S /Transparency /CS /DeviceRGB>>')
self._out('/Contents '+str(self.n+1)+' 0 R>>')
self._out('endobj')
#Page content
if self.compress:
p = zlib.compress(self.pages[n])
else:
p = self.pages[n]
self._newobj()
self._out('<<'+filter+'/Length '+str(len(p))+'>>')
self._putstream(p)
self._out('endobj')
#Pages root
self.offsets[1]=len(self.buffer)
self._out('1 0 obj')
self._out('<</Type /Pages')
kids='/Kids ['
for i in xrange(0,nb):
kids+=str(3+2*i)+' 0 R '
self._out(kids+']')
self._out('/Count '+str(nb))
self._out(sprintf('/MediaBox [0 0 %.2f %.2f]',w_pt,h_pt))
self._out('>>')
self._out('endobj')
def _putfonts(self):
nf=self.n
for diff in self.diffs:
#Encodings
self._newobj()
self._out('<</Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences ['+self.diffs[diff]+']>>')
self._out('endobj')
for name,info in self.font_files.iteritems():
if 'type' in info and info['type'] != 'TTF':
#Font file embedding
self._newobj()
self.font_files[name]['n']=self.n
font=''
f=open(self._getfontpath()+name,'rb',1)
if(not f):
self.error('Font file not found')
font=f.read()
f.close()
compressed=(substr(name,-2)=='.z')
if(not compressed and 'length2' in info):
header=(ord(font[0])==128)
if(header):
#Strip first binary header
font=substr(font,6)
if(header and ord(font[info['length1']])==128):
#Strip second binary header
font=substr(font,0,info['length1'])+substr(font,info['length1']+6)
self._out('<</Length '+str(len(font)))
if(compressed):
self._out('/Filter /FlateDecode')
self._out('/Length1 '+str(info['length1']))
if('length2' in info):
self._out('/Length2 '+str(info['length2'])+' /Length3 0')
self._out('>>')
self._putstream(font)
self._out('endobj')
for k,font in self.fonts.iteritems():
#Font objects
self.fonts[k]['n']=self.n+1
type=font['type']
name=font['name']
if(type=='core'):
#Standard font
self._newobj()
self._out('<</Type /Font')
self._out('/BaseFont /'+name)
self._out('/Subtype /Type1')
if(name!='Symbol' and name!='ZapfDingbats'):
self._out('/Encoding /WinAnsiEncoding')
self._out('>>')
self._out('endobj')
elif(type=='Type1' or type=='TrueType'):
#Additional Type1 or TrueType font
self._newobj()
self._out('<</Type /Font')
self._out('/BaseFont /'+name)
self._out('/Subtype /'+type)
self._out('/FirstChar 32 /LastChar 255')
self._out('/Widths '+str(self.n+1)+' 0 R')
self._out('/FontDescriptor '+str(self.n+2)+' 0 R')
if(font['enc']):
if('diff' in font):
self._out('/Encoding '+str(nf+font['diff'])+' 0 R')
else:
self._out('/Encoding /WinAnsiEncoding')
self._out('>>')
self._out('endobj')
#Widths
self._newobj()
cw=font['cw']
s='['
for i in xrange(32,256):
# Get doesn't rise exception; returns 0 instead of None if not set
s+=str(cw.get(chr(i)) or 0)+' '
self._out(s+']')
self._out('endobj')
#Descriptor
self._newobj()
s='<</Type /FontDescriptor /FontName /'+name
for k in ('Ascent', 'Descent', 'CapHeight', 'Falgs', 'FontBBox', 'ItalicAngle', 'StemV', 'MissingWidth'):
s += ' /%s %s' % (k, font['desc'][k])
filename=font['file']
if(filename):
s+=' /FontFile'
if type!='Type1':
s+='2'
s+=' '+str(self.font_files[filename]['n'])+' 0 R'
self._out(s+'>>')
self._out('endobj')
elif (type == 'TTF'):
self.fonts[k]['n'] = self.n + 1
ttf = TTFontFile()
fontname = 'MPDFAA' + '+' + font['name']
subset = font['subset']
del subset[0]
ttfontstream = ttf.makeSubset(font['ttffile'], subset)
ttfontsize = len(ttfontstream)
fontstream = zlib.compress(ttfontstream)
codeToGlyph = ttf.codeToGlyph
##del codeToGlyph[0]
# Type0 Font
# A composite font - a font composed of other fonts, organized hierarchically
self._newobj()
self._out('<</Type /Font');
self._out('/Subtype /Type0');
self._out('/BaseFont /' + fontname + '');
self._out('/Encoding /Identity-H');
self._out('/DescendantFonts [' + str(self.n + 1) + ' 0 R]')
self._out('/ToUnicode ' + str(self.n + 2) + ' 0 R')
self._out('>>')
self._out('endobj')
# CIDFontType2
# A CIDFont whose glyph descriptions are based on TrueType font technology
self._newobj()
self._out('<</Type /Font')
self._out('/Subtype /CIDFontType2')
self._out('/BaseFont /' + fontname + '')
self._out('/CIDSystemInfo ' + str(self.n + 2) + ' 0 R')
self._out('/FontDescriptor ' + str(self.n + 3) + ' 0 R')
if (font['desc'].get('MissingWidth')):
self._out('/DW %d' % font['desc']['MissingWidth'])
self._putTTfontwidths(font, ttf.maxUni)
self._out('/CIDToGIDMap ' + str(self.n + 4) + ' 0 R')
self._out('>>')
self._out('endobj')
# ToUnicode
self._newobj()
toUni = "/CIDInit /ProcSet findresource begin\n" \
"12 dict begin\n" \
"begincmap\n" \
"/CIDSystemInfo\n" \
"<</Registry (Adobe)\n" \
"/Ordering (UCS)\n" \
"/Supplement 0\n" \
">> def\n" \
"/CMapName /Adobe-Identity-UCS def\n" \
"/CMapType 2 def\n" \
"1 begincodespacerange\n" \
"<0000> <FFFF>\n" \
"endcodespacerange\n" \
"1 beginbfrange\n" \
"<0000> <FFFF> <0000>\n" \
"endbfrange\n" \
"endcmap\n" \
"CMapName currentdict /CMap defineresource pop\n" \
"end\n" \
"end"
self._out('<</Length ' + str(len(toUni)) + '>>')
self._putstream(toUni)
self._out('endobj')
# CIDSystemInfo dictionary
self._newobj()
self._out('<</Registry (Adobe)')
self._out('/Ordering (UCS)')
self._out('/Supplement 0')
self._out('>>')
self._out('endobj')
# Font descriptor
self._newobj()
self._out('<</Type /FontDescriptor')
self._out('/FontName /' + fontname)
for kd in ('Ascent', 'Descent', 'CapHeight', 'Flags', 'FontBBox', 'ItalicAngle', 'StemV', 'MissingWidth'):
v = font['desc'][kd]
if (kd == 'Flags'):
v = v | 4;
v = v & ~32; # SYMBOLIC font flag
self._out(' /%s %s' % (kd, v))
self._out('/FontFile2 ' + str(self.n + 2) + ' 0 R')
self._out('>>')
self._out('endobj')
# Embed CIDToGIDMap
# A specification of the mapping from CIDs to glyph indices
cidtogidmap = '';
cidtogidmap = ["\x00"] * 256*256*2
for cc, glyph in codeToGlyph.items():
cidtogidmap[cc*2] = chr(glyph >> 8)
cidtogidmap[cc*2 + 1] = chr(glyph & 0xFF)
cidtogidmap = zlib.compress(''.join(cidtogidmap));
self._newobj()
self._out('<</Length ' + str(len(cidtogidmap)) + '')
self._out('/Filter /FlateDecode')
self._out('>>')
self._putstream(cidtogidmap)
self._out('endobj')
#Font file
self._newobj()
self._out('<</Length ' + str(len(fontstream)))
self._out('/Filter /FlateDecode')
self._out('/Length1 ' + str(ttfontsize))
self._out('>>')
self._putstream(fontstream)
self._out('endobj')
del ttf
else:
#Allow for additional types
mtd='_put'+type.lower()
if(not method_exists(self,mtd)):
self.error('Unsupported font type: '+type)
self.mtd(font)
def _putTTfontwidths(self, font, maxUni):
cw127fname = os.path.splitext(font['unifilename'])[0] + '.cw127.pkl'
if (os.path.exists(cw127fname)):
fh = open(cw127fname);
try:
font_dict = pickle.load(fh)
finally:
fh.close()
rangeid = font_dict['rangeid']
range_ = font_dict['range']
prevcid = font_dict['prevcid']
prevwidth = font_dict['prevwidth']
interval = font_dict['interval']
range_interval = font_dict['range_interval']
startcid = 128
else:
rangeid = 0
range_ = {}
range_interval = {}
prevcid = -2
prevwidth = -1
interval = False
startcid = 1
cwlen = maxUni + 1
# for each character
for cid in range(startcid, cwlen):
if (cid==128 and not os.path.exists(cw127fname)):
try:
fh = open(cw127fname, "wb")
font_dict = {}
font_dict['rangeid'] = rangeid
font_dict['prevcid'] = prevcid
font_dict['prevwidth'] = prevwidth
font_dict['interval'] = interval
font_dict['range_interval'] = range_interval
font_dict['range'] = range_
pickle.dump(font_dict, fh)
fh.close()
except IOError, e:
if not e.errno == errno.EACCES:
raise # Not a permission error.
if (font['cw'][cid] == 0):
continue
width = font['cw'][cid]
if (width == 65535): width = 0
if (cid > 255 and (cid not in font['subset']) or not cid): #
continue
if ('dw' not in font or (font['dw'] and width != font['dw'])):
if (cid == (prevcid + 1)):
if (width == prevwidth):
if (width == range_[rangeid][0]):
range_.setdefault(rangeid, []).append(width)
else:
range_[rangeid].pop()
# new range
rangeid = prevcid
range_[rangeid] = [prevwidth, width]
interval = True
range_interval[rangeid] = True
else:
if (interval):
# new range
rangeid = cid
range_[rangeid] = [width]
else:
range_[rangeid].append(width)
interval = False
else:
rangeid = cid
range_[rangeid] = [width]
interval = False
prevcid = cid
prevwidth = width
prevk = -1
nextk = -1
prevint = False
for k, ws in sorted(range_.items()):
cws = len(ws)
if (k == nextk and not prevint and (not k in range_interval or cws < 3)):
if (k in range_interval):
del range_interval[k]
range_[prevk] = range_[prevk] + range_[k]
del range_[k]
else:
prevk = k
nextk = k + cws
if (k in range_interval):
prevint = (cws > 3)
del range_interval[k]
nextk -= 1
else:
prevint = False
w = []
for k, ws in sorted(range_.items()):
if (len(set(ws)) == 1):
w.append(' %s %s %s' % (k, k + len(ws) - 1, ws[0]))
else:
w.append(' %s [ %s ]\n' % (k, ' '.join([str(int(h)) for h in ws]))) ##
self._out('/W [%s]' % ''.join(w))
def _putimages(self):
filter=''
if self.compress:
filter='/Filter /FlateDecode '
for filename,info in self.images.iteritems():
self._putimage(info)
del info['data']
if 'smask' in info:
del info['smask']
def _putimage(self, info):
if 'data' in info:
self._newobj()
info['n']=self.n
self._out('<</Type /XObject')
self._out('/Subtype /Image')
self._out('/Width '+str(info['w']))
self._out('/Height '+str(info['h']))
if(info['cs']=='Indexed'):
self._out('/ColorSpace [/Indexed /DeviceRGB '+str(len(info['pal'])/3-1)+' '+str(self.n+1)+' 0 R]')
else:
self._out('/ColorSpace /'+info['cs'])
if(info['cs']=='DeviceCMYK'):
self._out('/Decode [1 0 1 0 1 0 1 0]')
self._out('/BitsPerComponent '+str(info['bpc']))
if 'f' in info:
self._out('/Filter /'+info['f'])
if 'dp' in info:
self._out('/DecodeParms <<' + info['dp'] + '>>')
if('trns' in info and isinstance(info['trns'], list)):
trns=''
for i in xrange(0,len(info['trns'])):
trns+=str(info['trns'][i])+' '+str(info['trns'][i])+' '
self._out('/Mask ['+trns+']')
if('smask' in info):
self._out('/SMask ' + str(self.n+1) + ' 0 R');
self._out('/Length '+str(len(info['data']))+'>>')
self._putstream(info['data'])
self._out('endobj')
# Soft mask
if('smask' in info):
dp = '/Predictor 15 /Colors 1 /BitsPerComponent 8 /Columns ' + str(info['w'])
smask = {'w': info['w'], 'h': info['h'], 'cs': 'DeviceGray', 'bpc': 8, 'f': info['f'], 'dp': dp, 'data': info['smask']}
self._putimage(smask)
#Palette
if(info['cs']=='Indexed'):
self._newobj()
filter = self.compress and '/Filter /FlateDecode ' or ''
if self.compress:
pal=zlib.compress(info['pal'])
else:
pal=info['pal']
self._out('<<'+filter+'/Length '+str(len(pal))+'>>')
self._putstream(pal)
self._out('endobj')
def _putxobjectdict(self):
for image in self.images.values():
self._out('/I'+str(image['i'])+' '+str(image['n'])+' 0 R')
def _putresourcedict(self):
self._out('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]')
self._out('/Font <<')
for font in self.fonts.values():
self._out('/F'+str(font['i'])+' '+str(font['n'])+' 0 R')
self._out('>>')
self._out('/XObject <<')
self._putxobjectdict()
self._out('>>')
def _putresources(self):
self._putfonts()
self._putimages()
#Resource dictionary
self.offsets[2]=len(self.buffer)
self._out('2 0 obj')
self._out('<<')
self._putresourcedict()
self._out('>>')
self._out('endobj')
def _putinfo(self):
self._out('/Producer '+self._textstring('PyFPDF '+FPDF_VERSION+' http://pyfpdf.googlecode.com/'))
if hasattr(self,'title'):
self._out('/Title '+self._textstring(self.title))
if hasattr(self,'subject'):
self._out('/Subject '+self._textstring(self.subject))
if hasattr(self,'author'):
self._out('/Author '+self._textstring(self.author))
if hasattr (self,'keywords'):
self._out('/Keywords '+self._textstring(self.keywords))
if hasattr(self,'creator'):
self._out('/Creator '+self._textstring(self.creator))
self._out('/CreationDate '+self._textstring('D:'+datetime.now().strftime('%Y%m%d%H%M%S')))
def _putcatalog(self):
self._out('/Type /Catalog')
self._out('/Pages 1 0 R')
if(self.zoom_mode=='fullpage'):
self._out('/OpenAction [3 0 R /Fit]')
elif(self.zoom_mode=='fullwidth'):
self._out('/OpenAction [3 0 R /FitH null]')
elif(self.zoom_mode=='real'):
self._out('/OpenAction [3 0 R /XYZ null null 1]')
elif(not isinstance(self.zoom_mode,basestring)):
self._out('/OpenAction [3 0 R /XYZ null null '+(self.zoom_mode/100)+']')
if(self.layout_mode=='single'):
self._out('/PageLayout /SinglePage')
elif(self.layout_mode=='continuous'):
self._out('/PageLayout /OneColumn')
elif(self.layout_mode=='two'):
self._out('/PageLayout /TwoColumnLeft')
def _putheader(self):
self._out('%PDF-'+self.pdf_version)
def _puttrailer(self):
self._out('/Size '+str(self.n+1))
self._out('/Root '+str(self.n)+' 0 R')
self._out('/Info '+str(self.n-1)+' 0 R')
def _enddoc(self):
self._putheader()
self._putpages()
self._putresources()
#Info
self._newobj()
self._out('<<')
self._putinfo()
self._out('>>')
self._out('endobj')
#Catalog
self._newobj()
self._out('<<')
self._putcatalog()
self._out('>>')
self._out('endobj')
#Cross-ref
o=len(self.buffer)
self._out('xref')
self._out('0 '+(str(self.n+1)))
self._out('0000000000 65535 f ')
for i in xrange(1,self.n+1):
self._out(sprintf('%010d 00000 n ',self.offsets[i]))
#Trailer
self._out('trailer')
self._out('<<')
self._puttrailer()
self._out('>>')
self._out('startxref')
self._out(o)
self._out('%%EOF')
self.state=3
def _beginpage(self, orientation):
self.page+=1
self.pages[self.page]=''
self.state=2
self.x=self.l_margin
self.y=self.t_margin
self.font_family=''
#Page orientation
if(not orientation):
orientation=self.def_orientation
else:
orientation=orientation[0].upper()
if(orientation!=self.def_orientation):
self.orientation_changes[self.page]=1
if(orientation!=self.cur_orientation):
#Change orientation
if(orientation=='P'):
self.w_pt=self.fw_pt
self.h_pt=self.fh_pt
self.w=self.fw
self.h=self.fh
else:
self.w_pt=self.fh_pt
self.h_pt=self.fw_pt
self.w=self.fh
self.h=self.fw
self.page_break_trigger=self.h-self.b_margin
self.cur_orientation=orientation
def _endpage(self):
#End of page contents
self.state=1
def _newobj(self):
#Begin a new object
self.n+=1
self.offsets[self.n]=len(self.buffer)
self._out(str(self.n)+' 0 obj')
def _dounderline(self, x,y,txt):
#Underline text
up=self.current_font['up']
ut=self.current_font['ut']
w=self.get_string_width(txt)+self.ws*txt.count(' ')
return sprintf('%.2f %.2f %.2f %.2f re f',x*self.k,(self.h-(y-up/1000.0*self.font_size))*self.k,w*self.k,-ut/1000.0*self.font_size_pt)
def _parsejpg(self, filename):
# Extract info from a JPEG file
if Image is None:
self.error('PIL not installed')
try:
f = open(filename, 'rb')
im = Image.open(f)
except Exception, e:
self.error('Missing or incorrect image file: %s. error: %s' % (filename, str(e)))
else:
a = im.size
# We shouldn't get into here, as Jpeg is RGB=8bpp right(?), but, just in case...
bpc=8
if im.mode == 'RGB':
colspace='DeviceRGB'
elif im.mode == 'CMYK':
colspace='DeviceCMYK'
else:
colspace='DeviceGray'
# Read whole file from the start
f.seek(0)
data = f.read()
f.close()
return {'w':a[0],'h':a[1],'cs':colspace,'bpc':bpc,'f':'DCTDecode','data':data}
def _parsegif(self, filename):
# Extract info from a GIF file (via PNG conversion)
if Image is None:
self.error('PIL is required for GIF support')
try:
im = Image.open(filename)
except Exception, e:
self.error('Missing or incorrect image file: %s. error: %s' % (filename, str(e)))
else:
# Use temporary file
f = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
tmp = f.name
f.close()
if "transparency" in im.info:
im.save(tmp, transparency = im.info['transparency'])
else:
im.save(tmp)
info = self._parsepng(tmp)
os.unlink(tmp)
return info
def _parsepng(self, name):
#Extract info from a PNG file
if name.startswith("http://") or name.startswith("https://"):
import urllib
f = urllib.urlopen(name)
else:
f=open(name,'rb')
if(not f):
self.error("Can't open image file: "+name)
#Check signature
if(f.read(8)!='\x89'+'PNG'+'\r'+'\n'+'\x1a'+'\n'):
self.error('Not a PNG file: '+name)
#Read header chunk
f.read(4)
if(f.read(4)!='IHDR'):
self.error('Incorrect PNG file: '+name)
w=self._freadint(f)
h=self._freadint(f)
bpc=ord(f.read(1))
if(bpc>8):
self.error('16-bit depth not supported: '+name)
ct=ord(f.read(1))
if(ct==0 or ct==4):
colspace='DeviceGray'
elif(ct==2 or ct==6):
colspace='DeviceRGB'
elif(ct==3):
colspace='Indexed'
else:
self.error('Unknown color type: '+name)
if(ord(f.read(1))!=0):
self.error('Unknown compression method: '+name)
if(ord(f.read(1))!=0):
self.error('Unknown filter method: '+name)
if(ord(f.read(1))!=0):
self.error('Interlacing not supported: '+name)
f.read(4)
dp='/Predictor 15 /Colors '
if colspace == 'DeviceRGB':
dp+='3'
else:
dp+='1'
dp+=' /BitsPerComponent '+str(bpc)+' /Columns '+str(w)+''
#Scan chunks looking for palette, transparency and image data
pal=''
trns=''
data=''
n=1
while n != None:
n=self._freadint(f)
type=f.read(4)
if(type=='PLTE'):
#Read palette
pal=f.read(n)
f.read(4)
elif(type=='tRNS'):
#Read transparency info
t=f.read(n)
if(ct==0):
trns=[ord(substr(t,1,1)),]
elif(ct==2):
trns=[ord(substr(t,1,1)),ord(substr(t,3,1)),ord(substr(t,5,1))]
else:
pos=t.find('\x00')
if(pos!=-1):
trns=[pos,]
f.read(4)
elif(type=='IDAT'):
#Read image data block
data+=f.read(n)
f.read(4)
elif(type=='IEND'):
break
else:
f.read(n+4)
if(colspace=='Indexed' and not pal):
self.error('Missing palette in '+name)
f.close()
info = {'w':w,'h':h,'cs':colspace,'bpc':bpc,'f':'FlateDecode','dp':dp,'pal':pal,'trns':trns,}
if(ct>=4):
# Extract alpha channel
data = zlib.decompress(data)
color = '';
alpha = '';
if(ct==4):
# Gray image
length = 2*w
for i in range(h):
pos = (1+length)*i
color += data[pos]
alpha += data[pos]
line = substr(data, pos+1, length)
color += re.sub('(.).',lambda m: m.group(1),line, flags=re.DOTALL)
alpha += re.sub('.(.)',lambda m: m.group(1),line, flags=re.DOTALL)
else:
# RGB image
length = 4*w
for i in range(h):
pos = (1+length)*i
color += data[pos]
alpha += data[pos]
line = substr(data, pos+1, length)
color += re.sub('(.{3}).',lambda m: m.group(1),line, flags=re.DOTALL)
alpha += re.sub('.{3}(.)',lambda m: m.group(1),line, flags=re.DOTALL)
del data
data = zlib.compress(color)
info['smask'] = zlib.compress(alpha)
if (self.pdf_version < '1.4'):
self.pdf_version = '1.4'
info['data'] = data
return info
def _freadint(self, f):
#Read a 4-byte integer from file
try:
return struct.unpack('>I', f.read(4))[0]
except:
return None
def _textstring(self, s):
#Format a text string
return '('+self._escape(s)+')'
def _escape(self, s):
#Add \ before \, ( and )
return s.replace('\\','\\\\').replace(')','\\)').replace('(','\\(').replace('\r','\\r')
def _putstream(self, s):
self._out('stream')
self._out(s)
self._out('endstream')
def _out(self, s):
#Add a line to the document
if(self.state==2):
self.pages[self.page]+=s+"\n"
else:
self.buffer+=str(s)+"\n"
def interleaved2of5(self, txt, x, y, w=1.0, h=10.0):
"Barcode I2of5 (numeric), adds a 0 if odd lenght"
narrow = w / 3.0
wide = w
# wide/narrow codes for the digits
bar_char={'0': 'nnwwn', '1': 'wnnnw', '2': 'nwnnw', '3': 'wwnnn',
'4': 'nnwnw', '5': 'wnwnn', '6': 'nwwnn', '7': 'nnnww',
'8': 'wnnwn', '9': 'nwnwn', 'A': 'nn', 'Z': 'wn'}
self.set_fill_color(0)
code = txt
# add leading zero if code-length is odd
if len(code) % 2 != 0:
code = '0' + code
# add start and stop codes
code = 'AA' + code.lower() + 'ZA'
for i in xrange(0, len(code), 2):
# choose next pair of digits
char_bar = code[i]
char_space = code[i+1]
# check whether it is a valid digit
if not char_bar in bar_char.keys():
raise RuntimeError ('Char "%s" invalid for I25: ' % char_bar)
if not char_space in bar_char.keys():
raise RuntimeError ('Char "%s" invalid for I25: ' % char_space)
# create a wide/narrow-seq (first digit=bars, second digit=spaces)
seq = ''
for s in xrange(0, len(bar_char[char_bar])):
seq += bar_char[char_bar][s] + bar_char[char_space][s]
for bar in xrange(0, len(seq)):
# set line_width depending on value
if seq[bar] == 'n':
line_width = narrow
else:
line_width = wide
# draw every second value, the other is represented by space
if bar % 2 == 0:
self.rect(x, y, line_width, h, 'F')
x += line_width
def code39(self, txt, x, y, w=1.5, h=5.0):
"Barcode 3of9"
wide = w
narrow = w / 3.0
gap = narrow
bar_char={'0': 'nnnwwnwnn', '1': 'wnnwnnnnw', '2': 'nnwwnnnnw',
'3': 'wnwwnnnnn', '4': 'nnnwwnnnw', '5': 'wnnwwnnnn',
'6': 'nnwwwnnnn', '7': 'nnnwnnwnw', '8': 'wnnwnnwnn',
'9': 'nnwwnnwnn', 'A': 'wnnnnwnnw', 'B': 'nnwnnwnnw',
'C': 'wnwnnwnnn', 'D': 'nnnnwwnnw', 'E': 'wnnnwwnnn',
'F': 'nnwnwwnnn', 'G': 'nnnnnwwnw', 'H': 'wnnnnwwnn',
'I': 'nnwnnwwnn', 'J': 'nnnnwwwnn', 'K': 'wnnnnnnww',
'L': 'nnwnnnnww', 'M': 'wnwnnnnwn', 'N': 'nnnnwnnww',
'O': 'wnnnwnnwn', 'P': 'nnwnwnnwn', 'Q': 'nnnnnnwww',
'R': 'wnnnnnwwn', 'S': 'nnwnnnwwn', 'T': 'nnnnwnwwn',
'U': 'wwnnnnnnw', 'V': 'nwwnnnnnw', 'W': 'wwwnnnnnn',
'X': 'nwnnwnnnw', 'Y': 'wwnnwnnnn', 'Z': 'nwwnwnnnn',
'-': 'nwnnnnwnw', '.': 'wwnnnnwnn', ' ': 'nwwnnnwnn',
'*': 'nwnnwnwnn', '$': 'nwnwnwnnn', '/': 'nwnwnnnwn',
'+': 'nwnnnwnwn', '%': 'nnnwnwnwn'}
self.set_fill_color(0)
code = txt
code = code.upper()
for i in xrange (0, len(code), 2):
char_bar = code[i]
if not char_bar in bar_char.keys():
raise RuntimeError ('Char "%s" invalid for Code39' % char_bar)
seq= ''
for s in xrange(0, len(bar_char[char_bar])):
seq += bar_char[char_bar][s]
for bar in xrange(0, len(seq)):
if seq[bar] == 'n':
line_width = narrow
else:
line_width = wide
if bar % 2 == 0:
self.rect(x, y, line_width, h, 'F')
x += line_width
x += gap
|
mit
| 6,015,571,498,540,385,000
| 36.10625
| 142
| 0.439847
| false
| 3.823769
| false
| false
| false
|
javiercantero/streamlink
|
src/streamlink/plugins/live_russia_tv.py
|
1
|
1078
|
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.stream import HLSStream
class LiveRussia(Plugin):
url_re = re.compile(r"https?://(?:www.)?live.russia.tv/index/index/channel_id/")
iframe_re = re.compile(r"""<iframe[^>]*src=["']([^'"]+)["'][^>]*>""")
stream_re = re.compile(r"""window.pl.data.*m3u8":"(.*)"}.*};""")
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_streams(self):
res = http.get(self.url)
iframe_result = re.search(self.iframe_re, res.text)
if not iframe_result:
self.logger.error("The requested content is unavailable.")
return
res = http.get(iframe_result.group(1))
stream_url_result = re.search(self.stream_re, res.text)
if not stream_url_result:
self.logger.error("The requested content is unavailable.")
return
return HLSStream.parse_variant_playlist(self.session, stream_url_result.group(1))
__plugin__ = LiveRussia
|
bsd-2-clause
| 8,933,361,089,661,961,000
| 31.69697
| 89
| 0.627087
| false
| 3.411392
| false
| false
| false
|
SVilgelm/CloudFerry
|
cloudferry/lib/utils/qemu_img.py
|
1
|
6446
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import abc
import json
from cloudferry.lib.utils import cmd_cfg
from cloudferry.lib.utils import log
from cloudferry.lib.utils import remote
from cloudferry.lib.utils import remote_runner
from cloudferry.lib.utils import ssh_util
LOG = log.getLogger(__name__)
class QemuImgInfoParser(object):
"""Parses `qemu-img info` command human-readable output.
Tested on qemu-img v1.0 and v2.0.0.
More recent versions of qemu-img support JSON output, but many real-world
systems with old openstack releases still come with qemu-img v1.0 which
does not support JSON"""
__metaclass__ = abc.ABCMeta
def __init__(self, img_info_output):
self.info = self.parse(img_info_output)
@abc.abstractmethod
def parse(self, img_info_output):
pass
@property
def backing_filename(self):
return self.info.get('backing-filename')
@property
def format(self):
return self.info.get('format')
class TextQemuImgInfoParser(QemuImgInfoParser):
def parse(self, img_info_output):
"""Returns dictionary based on human-readable output from
`qemu-img info`
Known problem: breaks if path contains opening parenthesis `(` or
colon `:`"""
result = {}
for l in img_info_output.split('\n'):
if not l.strip():
continue
try:
name, value = l.split(':', 1)
except ValueError:
continue
name = name.strip()
if name == 'backing file':
file_end = value.find('(')
if file_end == -1:
file_end = len(value)
result['backing-filename'] = value[:file_end].strip()
elif name == 'file format':
result['format'] = value.strip()
return result
class JsonQemuImgInfoParser(QemuImgInfoParser):
def parse(self, img_info_output):
return json.loads(img_info_output)
class QemuImg(ssh_util.SshUtil):
commit_cmd = cmd_cfg.qemu_img_cmd("commit %s")
commit_cd_cmd = cmd_cfg.cd_cmd & commit_cmd
convert_cmd = cmd_cfg.qemu_img_cmd("convert %s")
convert_full_image_cmd = cmd_cfg.cd_cmd & convert_cmd("-f %s -O %s %s %s")
rebase_cmd = cmd_cfg.qemu_img_cmd("rebase -u -b %s %s")
convert_cmd = convert_cmd("-O %s %s %s")
def diff_commit(self, dest_path, filename="disk", host_compute=None):
cmd = self.commit_cd_cmd(dest_path, filename)
return self.execute(cmd, host_compute)
def convert_image(self,
disk_format,
path_to_image,
output_format="raw",
baseimage="baseimage",
baseimage_tmp="baseimage.tmp",
host_compute=None):
cmd1 = self.convert_full_image_cmd(path_to_image,
disk_format,
output_format,
baseimage,
baseimage_tmp)
cmd2 = cmd_cfg.move_cmd(path_to_image,
baseimage_tmp,
baseimage)
return \
self.execute(cmd1, host_compute), self.execute(cmd2, host_compute)
def get_info(self, dest_disk_ephemeral, host_instance):
try:
# try to use JSON first, cause it's more reliable
cmd = "qemu-img info --output=json {ephemeral}".format(
ephemeral=dest_disk_ephemeral)
qemu_img_json = self.execute(cmd=cmd,
host_exec=host_instance,
ignore_errors=False,
sudo=True)
return JsonQemuImgInfoParser(qemu_img_json)
except (remote_runner.RemoteExecutionError, TypeError, ValueError) \
as e:
# old qemu version not supporting JSON, fallback to human-readable
# qemu-img output parser
LOG.debug("Failed to get JSON from 'qemu-img info %s', error: %s",
dest_disk_ephemeral, e)
cmd = "qemu-img info {ephemeral}".format(
ephemeral=dest_disk_ephemeral)
qemu_img_output = self.execute(cmd=cmd,
host_exec=host_instance,
ignore_errors=True,
sudo=True)
return TextQemuImgInfoParser(qemu_img_output)
def detect_backing_file(self, dest_disk_ephemeral, host_instance):
return self.get_info(dest_disk_ephemeral,
host_instance).backing_filename
def diff_rebase(self, baseimage, disk, host_compute=None):
LOG.debug("rebase diff: baseimage=%s, disk=%s, host_compute=%s",
baseimage, disk, host_compute)
cmd = self.rebase_cmd(baseimage, disk)
return self.execute(cmd, host_compute, sudo=True)
# example source_path = rbd:compute/QWEQWE-QWE231-QWEWQ
def convert(self, format_to, source_path, dest_path, host_compute=None):
cmd = self.convert_cmd(format_to, source_path, dest_path)
return self.execute(cmd, host_compute)
def get_disk_info(remote_executor, path):
try:
# try to use JSON first, cause it's more reliable
json_output = remote_executor.sudo(
'qemu-img info --output=json "{path}"', path=path)
return JsonQemuImgInfoParser(json_output)
except remote.RemoteFailure:
# old qemu version not supporting JSON, fallback to human-readable
# qemu-img output parser
plain_output = remote_executor.sudo(
'qemu-img info "{path}"', path=path)
return TextQemuImgInfoParser(plain_output)
|
apache-2.0
| -2,317,633,674,190,102,500
| 37.831325
| 78
| 0.580205
| false
| 4.054088
| false
| false
| false
|
ruleant/buildtime-trend
|
generate_trend.py
|
1
|
2619
|
#!/usr/bin/env python
# vim: set expandtab sw=4 ts=4:
'''
Generates a trend (graph) from the buildtimes in buildtimes.xml
Usage : generate_trend.py -h --mode=native,keen
Copyright (C) 2014 Dieter Adriaenssens <ruleant@users.sourceforge.net>
This file is part of buildtime-trend
<https://github.com/ruleant/buildtime-trend/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import sys
from buildtimetrend.tools import get_logger
from buildtimetrend.travis import load_travis_env_vars
from buildtimetrend.settings import Settings
from buildtimetrend.settings import process_argv
def generate_trend(argv):
'''
Generate trends from analised buildtime data
'''
settings = Settings()
# load Travis environment variables and save them in settings
load_travis_env_vars()
# process command line arguments
process_argv(argv)
# run trend_keen() always,
# if $KEEN_PROJECT_ID variable is set (checked later), it will be executed
if settings.get_setting("mode_native") is True:
trend_native()
if settings.get_setting("mode_keen") is True:
trend_keen()
def trend_native():
'''
Generate native trend with matplotlib : chart in PNG format
'''
from buildtimetrend.trend import Trend
# use parameter for timestamps file and check if file exists
result_file = os.getenv('BUILD_TREND_OUTPUTFILE', 'trends/buildtimes.xml')
chart_file = os.getenv('BUILD_TREND_TRENDFILE', 'trends/trend.png')
trend = Trend()
if trend.gather_data(result_file):
logger = get_logger()
# log number of builds and list of buildnames
logger.info('Builds (%d) : %s', len(trend.builds), trend.builds)
logger.info('Stages (%d) : %s', len(trend.stages), trend.stages)
trend.generate(chart_file)
def trend_keen():
'''
Setup trends using Keen.io API
'''
from buildtimetrend.keenio import generate_overview_config_file
generate_overview_config_file(Settings().get_project_name())
if __name__ == "__main__":
generate_trend(sys.argv)
|
gpl-3.0
| -5,318,230,504,942,116,000
| 30.939024
| 78
| 0.71554
| false
| 3.602476
| false
| false
| false
|
Silvian/samaritan
|
samaritan/constants.py
|
1
|
2682
|
"""
@author: Silvian Dragan
@Date: 05/05/2016
@Copyright: Copyright 2016, Samaritan CMA - Published under GNU General Public Licence v3
@Details: https://github.com/Silvian/samaritan
Main file for storing constants classes
"""
from django.conf import settings
from django.utils.timezone import now
class SettingsConstants:
"""Settings constants."""
author = settings.AUTHOR
copyright = settings.COPYRIGHT.format(year=now().year)
licence = settings.LICENCE
version = settings.VERSION
maintainer = settings.MAINTAINER
email = settings.EMAIL
def __init__(self):
return
@classmethod
def get_settings(cls):
return {
'author': cls.author,
'copyright': cls.copyright,
'licence': cls.licence,
'version': cls.version,
'maintainer': cls.maintainer,
'email': cls.email,
}
class WriterConstants:
"""Writer constants."""
TITLE_TEXT = "Report"
FIRST_NAME = "First Name"
LAST_NAME = "Last Name"
DATE_OF_BIRTH = "Date of Birth"
TELEPHONE = "Telephone"
EMAIL = "Email"
ADDRESS_NO = "No."
ADDRESS_STREET = "Street"
ADDRESS_LOCALITY = "Locality"
ADDRESS_CITY = "City"
ADDRESS_POSTCODE = "Postcode"
DETAILS = "Details"
IS_BAPTISED = "Is Baptised"
BAPTISMAL_DATE = "Baptismal Date"
BAPTISMAL_PLACE = "Baptismal Place"
IS_MEMBER = "Is Member"
MEMBERSHIP_TYPE = "Membership Type"
MEMBERSHIP_DATE = "Membership Date"
IS_ACTIVE = "Is Active"
GDPR = "GDPR"
CHURCH_ROLE = "Church Role"
NOTES = "Notes"
YES = "Yes"
NO = "No"
NOT_APPLICABLE = "N/A"
NOT_SPECIFIED = "Not specified"
DATE_FORMAT = "%d-%m-%Y"
FILE_NAME_DATE = "%Y-%m-%d-%H.%M.%S"
def __init__(self):
return
class AuthenticationConstants:
"""Authentication constants."""
LOGOUT_SUCCESS = "You've been logged out successfully"
ACCOUNT_DISABLED = "This account has been disabled"
INVALID_CREDENTIALS = "The username or password is incorrect"
INVALID_CODE = "The code entered is invalid"
LOCKOUT_MESSAGE = (
"Your account has been locked due to repeated failed login attempts! "
"Please contact the system administrator"
)
INCORRECT_PASSWORD = "Your current password is incorrect"
PASSWORD_MISMATCH = "The new password did not match password confirmation"
SAME_PASSWORD = "The new password cannot be the same as existing password"
WEAK_PASSWORD = "The password is too weak and cannot be used"
BREACHED_PASSWORD = "The password has been breached and cannot be used"
def __init__(self):
return
|
gpl-3.0
| 2,562,244,859,031,018,000
| 26.367347
| 89
| 0.645787
| false
| 3.547619
| false
| false
| false
|
AlexBaranosky/EmacsV2
|
floobits/floo/common/migrations.py
|
1
|
3449
|
import os
import json
import errno
from collections import defaultdict
try:
from . import shared as G
from . import utils
except (ImportError, ValueError):
import shared as G
import utils
def rename_floobits_dir():
# TODO: one day this can be removed (once all our users have updated)
old_colab_dir = os.path.realpath(os.path.expanduser(os.path.join('~', '.floobits')))
if os.path.isdir(old_colab_dir) and not os.path.exists(G.BASE_DIR):
print('renaming %s to %s' % (old_colab_dir, G.BASE_DIR))
os.rename(old_colab_dir, G.BASE_DIR)
os.symlink(G.BASE_DIR, old_colab_dir)
def get_legacy_projects():
a = ['msgs.floobits.log', 'persistent.json']
owners = os.listdir(G.COLAB_DIR)
floorc_json = defaultdict(defaultdict)
for owner in owners:
if len(owner) > 0 and owner[0] == '.':
continue
if owner in a:
continue
workspaces_path = os.path.join(G.COLAB_DIR, owner)
try:
workspaces = os.listdir(workspaces_path)
except OSError:
continue
for workspace in workspaces:
workspace_path = os.path.join(workspaces_path, workspace)
workspace_path = os.path.realpath(workspace_path)
try:
fd = open(os.path.join(workspace_path, '.floo'), 'rb')
url = json.loads(fd.read())['url']
fd.close()
except Exception:
url = utils.to_workspace_url({
'port': 3448, 'secure': True, 'host': 'floobits.com', 'owner': owner, 'workspace': workspace
})
floorc_json[owner][workspace] = {
'path': workspace_path,
'url': url
}
return floorc_json
def migrate_symlinks():
data = {}
old_path = os.path.join(G.COLAB_DIR, 'persistent.json')
if not os.path.exists(old_path):
return
old_data = utils.get_persistent_data(old_path)
data['workspaces'] = get_legacy_projects()
data['recent_workspaces'] = old_data.get('recent_workspaces')
utils.update_persistent_data(data)
try:
os.unlink(old_path)
os.unlink(os.path.join(G.COLAB_DIR, 'msgs.floobits.log'))
except Exception:
pass
def __load_floorc():
"""try to read settings out of the .floorc file"""
s = {}
try:
fd = open(G.FLOORC_PATH, 'r')
except IOError as e:
if e.errno == errno.ENOENT:
return s
raise
default_settings = fd.read().split('\n')
fd.close()
for setting in default_settings:
# TODO: this is horrible
if len(setting) == 0 or setting[0] == '#':
continue
try:
name, value = setting.split(' ', 1)
except IndexError:
continue
s[name.upper()] = value
return s
def migrate_floorc():
s = __load_floorc()
default_host = s.get('DEFAULT_HOST', G.DEFAULT_HOST)
floorc_json = {
'auth': {
default_host: {}
}
}
for k, v in s.items():
k = k.lower()
try:
v = int(v)
except Exception:
pass
if k in ['username', 'secret', 'api_key']:
floorc_json['auth'][default_host][k] = v
else:
floorc_json[k] = v
with open(G.FLOORC_JSON_PATH, 'w') as fd:
fd.write(json.dumps(floorc_json, indent=4, sort_keys=True))
|
gpl-3.0
| -8,162,171,141,782,456,000
| 28.228814
| 112
| 0.554364
| false
| 3.537436
| false
| false
| false
|
tangentlabs/django-fancypages
|
fancypages/managers.py
|
1
|
2544
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import django
from django.db import models
from django.utils.translation import get_language
from .queryset import PageQuerySet
class PageManager(models.Manager):
def get_select_related_queryset(self):
"""
Get the base query set that pulls the related ``PageNode`` whenever
the page queryset is used. The reason for this is that the page node
is essential and we don't want to have multiple queries every time.
:rtype: QuerySet
"""
return PageQuerySet(self.model).select_related('node')
def get_queryset(self):
"""
The default queryset ordering the pages by the node paths to make sure
that they are returned in the order they are in the tree.
:rtype: QuerySet
"""
return self.get_select_related_queryset().order_by('node__path')
def get_query_set(self):
"""
Method for backwards compatability only. Support for ``get_query_set``
will be dropped in Django 1.8.
"""
return self.get_queryset()
def top_level(self):
"""
Returns only the top level pages based on the depth provided in the
page node.
:rtype: QuerySet
"""
return self.get_queryset().filter(node__depth=1)
def visible(self, **kwargs):
return self.get_select_related_queryset().visible(**kwargs)
def visible_in(self, group):
return self.get_select_related_queryset().visible_in(group=group)
class ContainerManager(models.Manager):
def get_queryset(self):
if django.VERSION[:2] == (1, 5):
return super(ContainerManager, self).get_query_set()
return super(ContainerManager, self).get_queryset()
def get_language_query_set(self, **kwargs):
if 'language_code' not in kwargs:
kwargs['language_code'] = get_language()
return self.get_queryset().filter(**kwargs)
def all(self):
return self.get_language_query_set()
def filter(self, **kwargs):
return self.get_language_query_set(**kwargs)
def create(self, **kwargs):
if 'language_code' not in kwargs:
kwargs['language_code'] = get_language()
return super(ContainerManager, self).create(**kwargs)
def get_or_create(self, **kwargs):
if 'language_code' not in kwargs:
kwargs['language_code'] = get_language()
return self.get_queryset().get_or_create(**kwargs)
|
bsd-3-clause
| -7,800,725,164,194,478,000
| 30.407407
| 78
| 0.632862
| false
| 4.136585
| false
| false
| false
|
alex/readthedocs.org
|
readthedocs/doc_builder/backends/sphinx.py
|
1
|
5736
|
import os
import shutil
from django.template.loader import render_to_string
from django.template import Template, Context
from django.contrib.auth.models import SiteProfileNotAvailable
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from doc_builder.base import BaseBuilder, restoring_chdir
from projects.utils import safe_write, run
from core.utils import copy_to_app_servers
RTD_CONF_ADDITIONS = """
{% load projects_tags %}
#Add RTD Template Path.
if 'templates_path' in locals():
templates_path.insert(0, '{{ template_path }}')
else:
templates_path = ['{{ template_path }}', 'templates', '_templates', '.templates']
#Add RTD Static Path. Add to the end because it overwrites previous files.
if 'html_static_path' in locals():
html_static_path.append('{{ static_path }}')
else:
html_static_path = ['_static', '{{ static_path }}']
#Add RTD CSS File only if they aren't overriding it already
using_rtd_theme = False
if 'html_theme' in locals():
if html_theme in ['default']:
if not 'html_style' in locals():
html_style = 'rtd.css'
html_theme = 'default'
html_theme_options = {}
using_rtd_theme = True
else:
html_style = 'rtd.css'
html_theme = 'default'
html_theme_options = {}
using_rtd_theme = True
#Add sponsorship and project information to the template context.
context = {
'using_theme': using_rtd_theme,
'current_version': "{{ current_version.slug }}",
'MEDIA_URL': "{{ settings.MEDIA_URL }}",
'versions': [{% for version in versions|sort_version_aware %}
("{{ version.slug }}", "{{ version.get_absolute_url }}"),{% endfor %}
],
'slug': '{{ project.slug }}',
'name': '{{ project.name }}',
'badge_revsys': {{ project.sponsored }},
'analytics_code': '{{ project.analytics_code }}',
}
if 'html_context' in locals():
html_context.update(context)
else:
html_context = context
"""
TEMPLATE_DIR = '%s/readthedocs/templates/sphinx' % settings.SITE_ROOT
STATIC_DIR = '%s/_static' % TEMPLATE_DIR
class Builder(BaseBuilder):
"""
The parent for most sphinx builders.
Also handles the default sphinx output of html.
"""
def _whitelisted(self):
"""Modify the given ``conf.py`` file from a whitelisted user's project.
"""
project = self.version.project
#Open file for appending.
outfile = open(project.conf_file(self.version.slug), 'a')
outfile.write("\n")
rtd_ctx = Context({
'versions': project.active_versions(),
'current_version': self.version,
'project': project,
'settings': settings,
'static_path': STATIC_DIR,
'template_path': TEMPLATE_DIR,
})
rtd_string = Template(RTD_CONF_ADDITIONS).render(rtd_ctx)
outfile.write(rtd_string)
def _sanitize(self):
project = self.version.project
conf_template = render_to_string('sphinx/conf.py.conf',
{'project': project,
'template_dir': TEMPLATE_DIR,
'badge': project.sponsored
})
rtd_ctx = Context({
'versions': project.active_versions(),
'current_version': self.version,
'project': project,
'settings': settings,
'static_path': STATIC_DIR,
'template_path': TEMPLATE_DIR,
})
rtd_string = Template(RTD_CONF_ADDITIONS).render(rtd_ctx)
conf_template = conf_template + "\n" + rtd_string
safe_write(project.conf_file(self.version.slug), conf_template)
def clean(self):
try:
if self.version.project.whitelisted and self.version.project.is_imported:
print "Project whitelisted"
self._whitelisted()
else:
print "Writing conf to disk"
self._sanitize()
except (OSError, SiteProfileNotAvailable, ObjectDoesNotExist):
try:
print "Writing conf to disk on error."
self._sanitize()
except (OSError, IOError):
print "Conf file not found. Error writing to disk."
return ('', 'Conf file not found. Error writing to disk.', -1)
@restoring_chdir
def build(self):
project = self.version.project
os.chdir(project.conf_dir(self.version.slug))
if project.use_virtualenv and project.whitelisted:
build_command = '%s -b html . _build/html' % project.venv_bin(
version=self.version.slug, bin='sphinx-build')
else:
build_command = "sphinx-build -b html . _build/html"
build_results = run(build_command)
if 'no targets are out of date.' in build_results[1]:
self._changed = False
return build_results
def move(self):
project = self.version.project
if project.full_build_path(self.version.slug):
target = project.rtd_build_path(self.version.slug)
if getattr(settings, "MULTIPLE_APP_SERVERS", None):
print "Copying docs to remote server."
copy_to_app_servers(project.full_build_path(self.version.slug), target)
else:
if os.path.exists(target):
shutil.rmtree(target)
print "Copying docs on the local filesystem"
shutil.copytree(project.full_build_path(self.version.slug), target)
else:
print "Not moving docs, because the build dir is unknown."
|
mit
| -7,620,218,241,863,675,000
| 36.246753
| 87
| 0.588389
| false
| 4.12069
| false
| false
| false
|
lukeroge/CloudbotX
|
stratus/loader/pluginloader.py
|
1
|
16561
|
import asyncio
import enum
import glob
import importlib
import inspect
import logging
import os
import re
import itertools
from stratus.event import Event, HookEvent
logger = logging.getLogger("stratus")
class HookType(enum.Enum):
"""
"""
on_start = 1,
on_stop = 2,
sieve = 3,
event = 4,
regex = 5,
command = 6,
irc_raw = 7,
def find_plugins(plugin_directories):
"""
Args:
plugin_directories: A list of
Returns:
"""
for directory_pattern in plugin_directories:
for directory in glob.iglob(directory_pattern):
logger.info("Loading plugins from {}".format(directory))
if not os.path.exists(os.path.join(directory, "__init__.py")):
with open(os.path.join(directory, "__init__.py"), 'w') as file:
file.write('\n') # create blank __init__.py file if none exists
for plugin in glob.iglob(os.path.join(directory, '*.py')):
yield plugin
def find_hooks(title, module):
"""
:type title: str
:type module: object
:rtype: dict[HookType, list[Hook]
"""
# set the loaded flag
module._plugins_loaded = True
hooks_dict = dict()
for hook_type in HookType:
hooks_dict[hook_type] = list()
for name, func in module.__dict__.items():
if hasattr(func, "bot_hooks"):
# if it has stratus hook
for hook in func.bot_hooks:
hook_type = hook.type
hook_class = _hook_classes[hook_type]
hooks_dict[hook_type].append(hook_class(title, hook))
# delete the hook to free memory
del func.bot_hooks
return hooks_dict
def _prepare_parameters(hook, base_event, hook_event):
"""
Prepares arguments for the given hook
:type hook: stratus.loader.Hook
:type base_event: stratus.event.Event
:type hook_event: stratus.event.HookEvent
:rtype: list
"""
parameters = []
for required_arg in hook.required_args:
if hasattr(base_event, required_arg):
value = getattr(base_event, required_arg)
parameters.append(value)
elif hasattr(hook_event, required_arg):
value = getattr(hook_event, required_arg)
parameters.append(value)
else:
logger.warning("Plugin {} asked for invalid argument '{}', cancelling execution!"
.format(hook.description, required_arg))
logger.debug("Valid arguments are: {}".format(dir(base_event) + dir(hook_event)))
return None
return parameters
class Loader:
"""
Loader is the core of Stratus plugin loading.
Loader loads Plugins, and adds their Hooks to easy-access dicts/lists.
Each Plugin represents a file, and loads hooks onto itself using find_hooks.
Plugins are the lowest level of abstraction in this class. There are four different plugin types:
- CommandPlugin is for bot commands
- RawPlugin hooks onto irc_raw irc lines
- RegexPlugin loads a regex parameter, and executes on irc lines which match the regex
- SievePlugin is a catch-all sieve, which all other plugins go through before being executed.
:type bot: stratus.engine.Stratus
:type commands: dict[str, CommandHook]
:type raw_triggers: dict[str, list[RawHook]]
:type catch_all_triggers: list[RawHook]
:type event_type_hooks: dict[stratus.event.EventType, list[EventHook]]
:type regex_hooks: list[(re.__Regex, RegexHook)]
:type sieves: list[SieveHook]
"""
def __init__(self, bot):
"""
Creates a new Loader. You generally only need to do this from inside stratus.bot.Stratus
:type bot: stratus.engine.Stratus
"""
self.bot = bot
self.commands = {}
self.raw_triggers = {}
self.catch_all_triggers = []
self.event_type_hooks = {}
self.regex_hooks = []
self.sieves = []
self.shutdown_hooks = []
self._hook_locks = {}
async def load_all(self, plugin_directories):
"""
Load a plugin from each *.py file in the given directory.
:type plugin_directories: collections.Iterable[str]
"""
path_list = find_plugins(plugin_directories)
# Load plugins asynchronously :O
await asyncio.gather(*(self.load_plugin(path) for path in path_list), loop=self.bot.loop)
async def load_plugin(self, path):
"""
Loads a plugin from the given path and plugin object, then registers all hooks from that plugin.
:type path: str
"""
file_path = os.path.abspath(path)
relative_path = os.path.relpath(file_path, os.path.curdir)
module_name = os.path.splitext(relative_path)[0].replace(os.path.sep, '.')
if os.path.altsep:
module_name = module_name.replace(os.path.altsep, '.')
title = module_name
if module_name.startswith('plugins.'): # if it is in the default plugin dir, don't prepend plugins. to title
title = title[len('plugins.'):]
try:
plugin_module = importlib.import_module(module_name)
except Exception:
logger.exception("Error loading {}:".format(file_path))
return
hooks = find_hooks(title, plugin_module)
# proceed to register hooks
# run on_start hooks
on_start_event = Event(bot=self.bot)
for on_start_hook in hooks[HookType.on_start]:
success = await self.launch(on_start_hook, on_start_event)
if not success:
logger.warning("Not registering hooks from plugin {}: on_start hook errored".format(title))
return
# register events
for event_hook in hooks[HookType.event]:
for event_type in event_hook.types:
if event_type in self.event_type_hooks:
self.event_type_hooks[event_type].append(event_hook)
else:
self.event_type_hooks[event_type] = [event_hook]
self._log_hook(event_hook)
# register commands
for command_hook in hooks[HookType.command]:
for alias in command_hook.aliases:
if alias in self.commands:
logger.warning(
"Plugin {} attempted to register command {} which was already registered by {}. "
"Ignoring new assignment.".format(title, alias, self.commands[alias].plugin))
else:
self.commands[alias] = command_hook
self._log_hook(command_hook)
# register raw hooks
for raw_hook in hooks[HookType.irc_raw]:
if raw_hook.is_catch_all():
self.catch_all_triggers.append(raw_hook)
else:
for trigger in raw_hook.triggers:
if trigger in self.raw_triggers:
self.raw_triggers[trigger].append(raw_hook)
else:
self.raw_triggers[trigger] = [raw_hook]
self._log_hook(raw_hook)
# register regex hooks
for regex_hook in hooks[HookType.regex]:
for regex in regex_hook.triggers:
self.regex_hooks.append((regex, regex_hook))
self._log_hook(regex_hook)
# register sieves
for sieve_hook in hooks[HookType.sieve]:
self.sieves.append(sieve_hook)
self._log_hook(sieve_hook)
# register shutdown hooks
for stop_hook in hooks[HookType.on_stop]:
self.shutdown_hooks.append(stop_hook)
self._log_hook(stop_hook)
def _log_hook(self, hook):
"""
Logs registering a given hook
:type hook: Hook
"""
if self.bot.config.get("logging", {}).get("show_plugin_loading", True):
logger.debug("Loaded {}".format(repr(hook)))
async def _execute_hook(self, hook, base_event, hook_event):
"""
Runs the specific hook with the given bot and event.
Returns False if the hook errored, True otherwise.
:type hook: stratus.loader.Hook
:type base_event: stratus.event.Event
:type hook_event: stratus.event.HookEvent
:rtype: bool
"""
parameters = _prepare_parameters(hook, base_event, hook_event)
if parameters is None:
return False
try:
# _internal_run_threaded and _internal_run_coroutine prepare the database, and run the hook.
# _internal_run_* will prepare parameters and the database session, but won't do any error catching.
if hook.threaded:
out = await self.bot.loop.run_in_executor(None, hook.function, *parameters)
else:
out = await hook.function(*parameters)
except Exception:
logger.exception("Error in hook {}".format(hook.description))
base_event.message("Error in plugin '{}'.".format(hook.plugin))
return False
if out is not None:
if isinstance(out, (list, tuple)):
# if there are multiple items in the response, return them on multiple lines
base_event.reply(*out)
else:
base_event.reply(*str(out).split('\n'))
return True
async def _sieve(self, sieve, event, hook_event):
"""
:type sieve: stratus.loader.Hook
:type event: stratus.event.Event
:type hook_event: stratus.event.HookEvent
:rtype: stratus.event.Event
"""
try:
if sieve.threaded:
result = await self.bot.loop.run_in_executor(None, sieve.function, event, hook_event)
else:
result = await sieve.function(event, hook_event)
except Exception:
logger.exception("Error running sieve {} on {}:".format(sieve.description, hook_event.hook.description))
return None
else:
return result
async def launch(self, hook, base_event, hevent=None):
"""
Dispatch a given event to a given hook using a given bot object.
Returns False if the hook didn't run successfully, and True if it ran successfully.
:type base_event: stratus.event.Event
:type hevent: stratus.event.HookEvent | stratus.event.CommandHookEvent
:type hook: stratus.loader.Hook | stratus.loader.CommandHook
:rtype: bool
"""
if hevent is None:
hevent = HookEvent(base_event=base_event, hook=hook)
if hook.type not in (HookType.on_start, HookType.on_stop): # we don't need sieves on on_start or on_stop hooks.
for sieve in self.bot.loader.sieves:
base_event = await self._sieve(sieve, base_event, hevent)
if base_event is None:
return False
if hook.type is HookType.command and hook.auto_help and not hevent.text and hook.doc is not None:
hevent.notice_doc()
return False
if hook.single_thread:
# There should only be once instance of this hook running at a time, so let's use a lock for it.
key = (hook.plugin, hook.function_name)
if key not in self._hook_locks:
self._hook_locks[key] = asyncio.Lock(loop=self.bot.loop)
# Run the plugin with the message, and wait for it to finish
with (await self._hook_locks[key]):
result = await self._execute_hook(hook, base_event, hevent)
else:
# Run the plugin with the message, and wait for it to finish
result = await self._execute_hook(hook, base_event, hevent)
# Return the result
return result
async def run_shutdown_hooks(self):
shutdown_event = Event(bot=self.bot)
tasks = (self.launch(hook, shutdown_event) for hook in self.shutdown_hooks)
await asyncio.gather(*tasks, loop=self.bot.loop)
class Hook:
"""
Each hook is specific to one function. This class is never used by itself, rather extended.
:type type: HookType
:type plugin: str
:type function: callable
:type function_name: str
:type required_args: list[str]
:type threaded: bool
:type run_first: bool
:type permissions: list[str]
:type single_thread: bool
"""
type = None # to be assigned in subclasses
def __init__(self, plugin, hook_decorator):
"""
:type plugin: str
"""
self.plugin = plugin
self.function = hook_decorator.function
self.function_name = self.function.__name__
self.required_args = inspect.getargspec(self.function)[0]
if self.required_args is None:
self.required_args = []
if asyncio.iscoroutine(self.function) or asyncio.iscoroutinefunction(self.function):
self.threaded = False
else:
self.threaded = True
self.permissions = hook_decorator.kwargs.pop("permissions", [])
self.single_thread = hook_decorator.kwargs.pop("single_instance", False)
self.run_first = hook_decorator.kwargs.pop("run_first", False)
if hook_decorator.kwargs:
# we should have popped all the args, so warn if there are any left
logger.warning("Ignoring extra args {} from {}".format(hook_decorator.kwargs, self.description))
@property
def description(self):
return "{}:{}".format(self.plugin, self.function_name)
def __repr__(self, **kwargs):
result = "type: {}, plugin: {}, permissions: {}, run_first: {}, single_instance: {}, threaded: {}".format(
self.type.name, self.plugin, self.permissions, self.run_first, self.single_thread, self.threaded)
if kwargs:
result = ", ".join(itertools.chain(("{}: {}".format(*item) for item in kwargs.items()), (result,)))
return "{}[{}]".format(type(self).__name__, result)
class OnStartHook(Hook):
type = HookType.on_start
class OnStopHook(Hook):
type = HookType.on_stop
class SieveHook(Hook):
type = HookType.sieve
class EventHook(Hook):
"""
:type types: set[stratus.event.EventType]
"""
type = HookType.event
def __init__(self, plugin, decorator):
"""
:type plugin: Plugin
:type decorator: stratus.hook.EventDecorator
"""
self.types = decorator.triggers
super().__init__(plugin, decorator)
class RegexHook(Hook):
"""
:type triggers: set[re.__Regex]
"""
type = HookType.regex
def __init__(self, plugin, decorator):
"""
:type plugin: Plugin
:type decorator: stratus.hook.RegexDecorator
"""
self.triggers = decorator.triggers
super().__init__(plugin, decorator)
def __repr__(self):
return super().__repr__(triggers=", ".join(regex.pattern for regex in self.triggers))
class CommandHook(Hook):
"""
:type name: str
:type aliases: list[str]
:type doc: str
:type auto_help: bool
"""
type = HookType.command
def __init__(self, plugin, decorator):
"""
:type plugin: str
:type decorator: stratus.hook.CommandDecorator
"""
self.auto_help = decorator.kwargs.pop("autohelp", True)
self.name = decorator.main_alias
self.aliases = list(decorator.triggers) # turn the set into a list
self.aliases.remove(self.name)
self.aliases.insert(0, self.name) # make sure the name, or 'main alias' is in position 0
self.doc = decorator.doc
super().__init__(plugin, decorator)
def __repr__(self):
return super().__repr__(name=self.name, aliases=self.aliases[1:])
class RawHook(Hook):
"""
:type triggers: set[str]
"""
type = HookType.irc_raw
def __init__(self, plugin, decorator):
"""
:type plugin: Plugin
:type decorator: stratus.hook.IrcRawDecorator
"""
self.triggers = decorator.triggers
super().__init__(plugin, decorator)
def is_catch_all(self):
return "*" in self.triggers
def __repr__(self):
return super().__repr__(triggers=self.triggers)
_hook_classes = {
HookType.on_start: OnStartHook,
HookType.on_stop: OnStopHook,
HookType.sieve: SieveHook,
HookType.event: EventHook,
HookType.regex: RegexHook,
HookType.command: CommandHook,
HookType.irc_raw: RawHook,
}
|
gpl-3.0
| 3,569,536,791,953,528,300
| 32.122
| 120
| 0.596522
| false
| 4.048154
| false
| false
| false
|
ddurieux/alignak
|
alignak/property.py
|
1
|
11612
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- mode: python ; coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Hartmut Goebel, h.goebel@goebel-consult.de
# Guillaume Bour, guillaume@bour.cc
# Frédéric Vachon, fredvac@gmail.com
# aviau, alexandre.viau@savoirfairelinux.com
# Nicolas Dupeux, nicolas@dupeux.net
# Grégory Starck, g.starck@gmail.com
# Gerhard Lausser, gerhard.lausser@consol.de
# Sebastien Coavoux, s.coavoux@free.fr
# Christophe Simon, geektophe@gmail.com
# Jean Gabes, naparuba@gmail.com
# Romain Forlot, rforlot@yahoo.com
# Christophe SIMON, christophe.simon@dailymotion.com
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import re
from alignak.util import to_float, to_split, to_char, to_int, unique_value, list_split
import logging
__all__ = ['UnusedProp', 'BoolProp', 'IntegerProp', 'FloatProp',
'CharProp', 'StringProp', 'ListProp',
'FULL_STATUS', 'CHECK_RESULT']
# Suggestion
# Is this useful? see above
__author__ = "Hartmut Goebel <h.goebel@goebel-consult.de>"
__copyright__ = "Copyright 2010-2011 by Hartmut Goebel <h.goebel@goebel-consult.de>"
__licence__ = "GNU Affero General Public License version 3 (AGPL v3)"
FULL_STATUS = 'full_status'
CHECK_RESULT = 'check_result'
none_object = object()
class Property(object):
"""Baseclass of all properties.
Same semantic for all subclasses (except UnusedProp): The property
is required if, and only if, the default value is `None`.
"""
def __init__(self, default=none_object, class_inherit=None,
unmanaged=False, help='', no_slots=False,
fill_brok=None, conf_send_preparation=None,
brok_transformation=None, retention=False,
retention_preparation=None, to_send=False,
override=False, managed=True, split_on_coma=True, merging='uniq'):
"""
`default`: default value to be used if this property is not set.
If default is None, this property is required.
`class_inherit`: List of 2-tuples, (Service, 'blabla'): must
set this property to the Service class with name
blabla. if (Service, None): must set this property
to the Service class with same name
`unmanaged`: ....
`help`: usage text
`no_slots`: do not take this property for __slots__
`fill_brok`: if set, send to broker. There are two categories:
FULL_STATUS for initial and update status,
CHECK_RESULT for check results
`retention`: if set, we will save this property in the retention files
`retention_preparation`: function, if set, will go this function before
being save to the retention data
`split_on_coma`: indicates that list property value should not be
splitted on coma delimiter (values conain comas that
we want to keep).
Only for the initial call:
conf_send_preparation: if set, will pass the property to this
function. It's used to 'flatten' some dangerous
properties like realms that are too 'linked' to
be send like that.
brok_transformation: if set, will call the function with the
value of the property when flattening
data is necessary (like realm_name instead of
the realm object).
override: for scheduler, if the property must override the
value of the configuration we send it
managed: property that is managed in Nagios but not in Alignak
merging: for merging properties, should we take only one or we can
link with ,
"""
self.default = default
self.has_default = (default is not none_object)
self.required = not self.has_default
self.class_inherit = class_inherit or []
self.help = help or ''
self.unmanaged = unmanaged
self.no_slots = no_slots
self.fill_brok = fill_brok or []
self.conf_send_preparation = conf_send_preparation
self.brok_transformation = brok_transformation
self.retention = retention
self.retention_preparation = retention_preparation
self.to_send = to_send
self.override = override
self.managed = managed
self.unused = False
self.merging = merging
self.split_on_coma = split_on_coma
def pythonize(self, val):
return val
class UnusedProp(Property):
"""A unused Property. These are typically used by Nagios but
no longer useful/used by Alignak.
This is just to warn the user that the option he uses is no more used
in Alignak.
"""
# Since this property is not used, there is no use for other
# parameters than 'text'.
# 'text' a some usage text if present, will print it to explain
# why it's no more useful
def __init__(self, text=None):
if text is None:
text = ("This parameter is no longer useful in the "
"Alignak architecture.")
self.text = text
self.has_default = False
self.class_inherit = []
self.unused = True
self.managed = True
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
class BoolProp(Property):
"""A Boolean Property.
Boolean values are currently case insensitively defined as 0,
false, no, off for False, and 1, true, yes, on for True).
"""
@staticmethod
def pythonize(val):
if isinstance(val, bool):
return val
val = unique_value(val).lower()
if val in _boolean_states.keys():
return _boolean_states[val]
else:
raise PythonizeError("Cannot convert '%s' to a boolean value" % val)
class IntegerProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
val = unique_value(val)
return to_int(val)
class FloatProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
val = unique_value(val)
return to_float(val)
class CharProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
val = unique_value(val)
return to_char(val)
class StringProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
val = unique_value(val)
return val
class PathProp(StringProp):
""" A string property representing a "running" (== VAR) file path """
class ConfigPathProp(StringProp):
""" A string property representing a config file path """
class ListProp(Property):
"""Please Add a Docstring to describe the class here"""
def pythonize(self, val):
if isinstance(val, list):
return [s.strip() for s in list_split(val, self.split_on_coma)]
else:
return [s.strip() for s in to_split(val, self.split_on_coma)]
class LogLevelProp(StringProp):
""" A string property representing a logging level """
def pythonize(self, val):
val = unique_value(val)
return logging.getLevelName(val)
class DictProp(Property):
def __init__(self, elts_prop=None, *args, **kwargs):
"""Dictionary of values.
If elts_prop is not None, must be a Property subclass
All dict values will be casted as elts_prop values when pythonized
elts_prop = Property of dict members
"""
super(DictProp, self).__init__(*args, **kwargs)
if elts_prop is not None and not issubclass(elts_prop, Property):
raise TypeError("DictProp constructor only accept Property"
"sub-classes as elts_prop parameter")
if elts_prop is not None:
self.elts_prop = elts_prop()
def pythonize(self, val):
val = unique_value(val)
def split(kv):
m = re.match("^\s*([^\s]+)\s*=\s*([^\s]+)\s*$", kv)
if m is None:
raise ValueError
return (
m.group(1),
# >2.4 only. we keep it for later. m.group(2) if self.elts_prop is None
# else self.elts_prop.pythonize(m.group(2))
(self.elts_prop.pythonize(m.group(2)), m.group(2))[self.elts_prop is None]
)
if val is None:
return(dict())
if self.elts_prop is None:
return val
# val is in the form "key1=addr:[port],key2=addr:[port],..."
print ">>>", dict([split(kv) for kv in to_split(val)])
return dict([split(kv) for kv in to_split(val)])
class AddrProp(Property):
"""Address property (host + port)"""
def pythonize(self, val):
"""
i.e: val = "192.168.10.24:445"
NOTE: port is optional
"""
val = unique_value(val)
m = re.match("^([^:]*)(?::(\d+))?$", val)
if m is None:
raise ValueError
addr = {'address': m.group(1)}
if m.group(2) is not None:
addr['port'] = int(m.group(2))
return addr
class ToGuessProp(Property):
"""Unknown property encountered while parsing"""
@staticmethod
def pythonize(val):
if isinstance(val, list) and len(set(val)) == 1:
# If we have a list with a unique value just use it
return val[0]
else:
# Well, can't choose to remove somthing.
return val
class IntListProp(ListProp):
"""Integer List property"""
def pythonize(self, val):
val = super(IntListProp, self).pythonize(val)
try:
return [int(e) for e in val]
except ValueError as value_except:
raise PythonizeError(str(value_except))
class PythonizeError(Exception):
pass
|
agpl-3.0
| 6,923,826,551,157,229,000
| 32.359195
| 90
| 0.617797
| false
| 3.840225
| false
| false
| false
|
mola/qgis
|
python/plugins/GdalTools/tools/doTranslate.py
|
1
|
12011
|
# -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from ui_widgetTranslate import Ui_GdalToolsWidget as Ui_Widget
from widgetBatchBase import GdalToolsBaseBatchWidget as BaseBatchWidget
from dialogSRS import GdalToolsSRSDialog as SRSDialog
import GdalTools_utils as Utils
class GdalToolsDialog(QWidget, Ui_Widget, BaseBatchWidget):
def __init__(self, iface):
QWidget.__init__(self)
self.iface = iface
self.canvas = self.iface.mapCanvas()
self.setupUi(self)
BaseBatchWidget.__init__(self, self.iface, "gdal_translate")
# set the default QSpinBoxes and QProgressBar value
self.outsizeSpin.setValue(25)
self.progressBar.setValue(0)
self.progressBar.hide()
self.formatLabel.hide()
self.formatCombo.hide()
if Utils.Version( Utils.GdalConfig.version() ) < "1.7":
index = self.expandCombo.findText('gray', Qt.MatchFixedString)
if index >= 0:
self.expandCombo.removeItem(index)
self.outputFormat = Utils.fillRasterOutputFormat()
self.setParamsStatus(
[
(self.inputLayerCombo, [SIGNAL("currentIndexChanged(int)"), SIGNAL("editTextChanged(const QString &)")] ),
(self.outputFileEdit, SIGNAL("textChanged(const QString &)")),
(self.targetSRSEdit, SIGNAL("textChanged(const QString &)"), self.targetSRSCheck),
(self.selectTargetSRSButton, None, self.targetSRSCheck),
(self.creationOptionsTable, [SIGNAL("cellValueChanged(int, int)"), SIGNAL("rowRemoved()")], self.creationGroupBox),
(self.outsizeSpin, SIGNAL("valueChanged(const QString &)"), self.outsizeCheck),
(self.nodataSpin, SIGNAL("valueChanged(int)"), self.nodataCheck),
(self.expandCombo, SIGNAL("currentIndexChanged(int)"), self.expandCheck, "1.6.0"),
(self.sdsCheck, SIGNAL("stateChanged(int)")),
(self.srcwinEdit, SIGNAL("textChanged(const QString &)"), self.srcwinCheck),
(self.prjwinEdit, SIGNAL("textChanged(const QString &)"), self.prjwinCheck)
]
)
#self.connect(self.canvas, SIGNAL("layersChanged()"), self.fillInputLayerCombo)
self.connect(self.inputLayerCombo, SIGNAL("currentIndexChanged(int)"), self.fillTargetSRSEditDefault)
self.connect( self.selectInputFileButton, SIGNAL( "clicked()" ), self.fillInputFile )
self.connect(self.selectOutputFileButton, SIGNAL("clicked()"), self.fillOutputFileEdit)
self.connect(self.selectTargetSRSButton, SIGNAL("clicked()"), self.fillTargetSRSEdit)
self.connect( self.batchCheck, SIGNAL( "stateChanged( int )" ), self.switchToolMode )
# add raster filters to combo
self.formatCombo.addItems( Utils.FileFilter.allRastersFilter().split( ";;" ) )
# add layers to combo
self.fillInputLayerCombo()
def switchToolMode( self ):
self.setCommandViewerEnabled( not self.batchCheck.isChecked() )
self.inputLayerCombo.clear()
self.inputLayerCombo.clearEditText()
self.inputLayerCombo.setCurrentIndex(-1)
self.outputFileEdit.clear()
if self.batchCheck.isChecked():
self.inFileLabel = self.label_3.text()
self.outFileLabel = self.label_2.text()
self.label_3.setText( QCoreApplication.translate( "GdalTools", "&Input directory" ) )
self.label_2.setText( QCoreApplication.translate( "GdalTools", "&Output directory" ) )
self.progressBar.show()
self.formatLabel.show()
self.formatCombo.show()
QObject.disconnect( self.selectInputFileButton, SIGNAL( "clicked()" ), self.fillInputFile )
QObject.disconnect( self.selectOutputFileButton, SIGNAL( "clicked()" ), self.fillOutputFileEdit )
QObject.connect( self.selectInputFileButton, SIGNAL( "clicked()" ), self. fillInputDir )
QObject.connect( self.selectOutputFileButton, SIGNAL( "clicked()" ), self.fillOutputDir )
else:
self.label_3.setText( self.inFileLabel )
self.label_2.setText( self.outFileLabel )
self.base.textEditCommand.setEnabled( True )
self.fillInputLayerCombo()
self.progressBar.hide()
self.formatLabel.hide()
self.formatCombo.hide()
QObject.disconnect( self.selectInputFileButton, SIGNAL( "clicked()" ), self.fillInputDir )
QObject.disconnect( self.selectOutputFileButton, SIGNAL( "clicked()" ), self.fillOutputDir )
QObject.connect( self.selectInputFileButton, SIGNAL( "clicked()" ), self.fillInputFile )
QObject.connect( self.selectOutputFileButton, SIGNAL( "clicked()" ), self.fillOutputFileEdit )
def fillInputLayerCombo(self):
self.inputLayerCombo.clear()
( self.layers, names ) = Utils.getRasterLayers()
self.inputLayerCombo.addItems( names )
def fillInputFile( self ):
lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter()
inputFile = Utils.FileDialog.getOpenFileName( self, self.tr( "Select the input file for Translate" ), Utils.FileFilter.allRastersFilter(), lastUsedFilter )
if inputFile.isEmpty():
return
Utils.FileFilter.setLastUsedRasterFilter( lastUsedFilter )
self.inputLayerCombo.setCurrentIndex(-1)
self.inputLayerCombo.setEditText( inputFile )
# get SRS for target file if necessary and possible
self.refreshTargetSRS()
def fillInputDir( self ):
inputDir = Utils.FileDialog.getExistingDirectory( self, self.tr( "Select the input directory with files to Translate" ))
if inputDir.isEmpty():
return
self.inputLayerCombo.setCurrentIndex(-1)
self.inputLayerCombo.setEditText( inputDir )
filter = Utils.getRasterExtensions()
workDir = QDir( inputDir )
workDir.setFilter( QDir.Files | QDir.NoSymLinks | QDir.NoDotAndDotDot )
workDir.setNameFilters( filter )
# search for a valid SRS, then use it as default target SRS
srs = QString()
for fname in workDir.entryList():
fl = inputDir + "/" + fname
srs = Utils.getRasterSRS( self, fl )
if not srs.isEmpty():
break
self.targetSRSEdit.setText( srs )
def fillOutputFileEdit(self):
lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter()
outputFile = Utils.FileDialog.getSaveFileName(self, self.tr( "Select the raster file to save the results to" ), Utils.FileFilter.allRastersFilter(), lastUsedFilter )
if outputFile.isEmpty():
return
Utils.FileFilter.setLastUsedRasterFilter(lastUsedFilter)
self.outputFormat = Utils.fillRasterOutputFormat(lastUsedFilter, outputFile)
self.outputFileEdit.setText(outputFile)
def fillOutputDir( self ):
outputDir = Utils.FileDialog.getExistingDirectory( self, self.tr( "Select the output directory to save the results to" ) )
if outputDir.isEmpty():
return
self.outputFileEdit.setText( outputDir )
def fillTargetSRSEditDefault(self, index):
if index < 0:
return
self.refreshTargetSRS()
def refreshTargetSRS(self):
self.targetSRSEdit.setText( Utils.getRasterSRS( self, self.getInputFileName() ) )
def fillTargetSRSEdit(self):
dialog = SRSDialog( "Select the target SRS" )
if dialog.exec_():
self.targetSRSEdit.setText(dialog.getProjection())
def getArguments(self):
arguments = QStringList()
if self.targetSRSCheck.isChecked() and not self.targetSRSEdit.text().isEmpty():
arguments << "-a_srs"
arguments << self.targetSRSEdit.text()
if self.creationGroupBox.isChecked():
for opt in self.creationOptionsTable.options():
arguments << "-co"
arguments << opt
if self.outsizeCheck.isChecked() and self.outsizeSpin.value() != 100:
arguments << "-outsize"
arguments << self.outsizeSpin.text()
arguments << self.outsizeSpin.text()
if self.expandCheck.isChecked():
arguments << "-expand"
arguments << self.expandCombo.currentText().toLower()
if self.nodataCheck.isChecked():
arguments << "-a_nodata"
arguments << str(self.nodataSpin.value())
if self.sdsCheck.isChecked():
arguments << "-sds"
if self.srcwinCheck.isChecked() and not self.srcwinEdit.text().isEmpty():
#coordList = []
coordList = self.srcwinEdit.text().split( ' ', QString.SkipEmptyParts )
if len(coordList) == 4 and not coordList[3].isEmpty():
try:
for x in coordList:
test = int(x)
except ValueError:
#print "Coordinates must be integer numbers."
QMessageBox.critical(self, self.tr("Translate - srcwin"), self.tr("Image coordinates (pixels) must be integer numbers."))
else:
arguments << "-srcwin"
for x in coordList:
arguments << x
if self.prjwinCheck.isChecked() and not self.prjwinEdit.text().isEmpty():
#coordList = []
coordList = self.prjwinEdit.text().split( ' ', QString.SkipEmptyParts )
if len(coordList) == 4 and not coordList[3].isEmpty():
try:
for x in coordList:
test = float(x)
except ValueError:
#print "Coordinates must be integer numbers."
QMessageBox.critical(self, self.tr("Translate - prjwin"), self.tr("Image coordinates (geographic) must be numbers."))
else:
arguments << "-projwin"
for x in coordList:
arguments << x
if self.isBatchEnabled():
if self.formatCombo.currentIndex() != 0:
arguments << "-of"
arguments << Utils.fillRasterOutputFormat( self.formatCombo.currentText() )
return arguments
else:
return arguments
if not self.outputFileEdit.text().isEmpty():
arguments << "-of"
arguments << self.outputFormat
arguments << self.getInputFileName()
arguments << self.getOutputFileName()
return arguments
def getInputFileName(self):
if self.inputLayerCombo.currentIndex() >= 0:
return self.layers[self.inputLayerCombo.currentIndex()].source()
return self.inputLayerCombo.currentText()
def getOutputFileName(self):
return self.outputFileEdit.text()
def addLayerIntoCanvas(self, fileInfo):
self.iface.addRasterLayer(fileInfo.filePath())
def isBatchEnabled(self):
return self.batchCheck.isChecked()
def setProgressRange(self, maximum):
self.progressBar.setRange(0, maximum)
def updateProgress(self, index, total):
if index < total:
self.progressBar.setValue( index + 1 )
else:
self.progressBar.setValue( 0 )
def batchRun(self):
exts = self.formatCombo.currentText().remove( QRegExp('^.*\(') ).remove( QRegExp('\).*$') ).split( " " )
if not exts.isEmpty() and exts != "*" and exts != "*.*":
outExt = exts[ 0 ].remove( "*" )
else:
outExt = ".tif"
self.base.enableRun( False )
self.base.setCursor( Qt.WaitCursor )
inDir = self.getInputFileName()
outDir = self.getOutputFileName()
filter = Utils.getRasterExtensions()
workDir = QDir( inDir )
workDir.setFilter( QDir.Files | QDir.NoSymLinks | QDir.NoDotAndDotDot )
workDir.setNameFilters( filter )
files = workDir.entryList()
self.inFiles = []
self.outFiles = []
for f in files:
self.inFiles.append( inDir + "/" + f )
if outDir != None:
outFile = f.replace( QRegExp( "\.[a-zA-Z0-9]{2,4}" ), outExt )
self.outFiles.append( outDir + "/" + outFile )
self.errors = QStringList()
self.batchIndex = 0
self.batchTotal = len( self.inFiles )
self.setProgressRange( self.batchTotal )
self.runItem( self.batchIndex, self.batchTotal )
|
gpl-2.0
| 8,092,497,440,117,212,000
| 39.577703
| 171
| 0.650737
| false
| 3.939324
| false
| false
| false
|
kvirund/codingame
|
medium/network.cabling/solution.py
|
1
|
1412
|
#!/usr/bin/python
# Author: Anton Gorev aka Veei
# Date: 2014-11-06
import sys
n = int(raw_input())
ys = []
sum = 0
first = True
minx = maxx = 0
miny = maxy = 0
for i in xrange(n):
b = [int(a) for a in raw_input().split(" ")]
ys += [b[1]]
sum += b[1]
if first or minx > b[0]:
minx = b[0]
if first or maxx < b[0]:
maxx = b[0]
if first or miny > b[1]:
miny = b[1]
if first or maxy < b[1]:
maxy = b[1]
first = False
def length(ys, y):
return reduce(lambda a, b: a + abs(b - y), ys, 0)
result = y = miny
lmin = length(ys, miny)
lmax = length(ys, maxy)
while miny != maxy:
print >> sys.stderr, miny, maxy
if 1 == maxy - miny:
if lmin < lmax:
maxy = miny
else:
miny = maxy
break
midy = (maxy + miny)/2
lmid = length(ys, midy)
if lmid < lmin and lmid < lmax:
nl = length(ys, midy + 1)
if nl > lmid:
maxy = midy
lmax = lmid
else:
miny = midy
lmin = lmid
elif lmid < lmin and lmid >= lmax:
miny = midy
lmin = lmid
elif lmid >= lmin and lmid < lmax:
lmax = lmid
maxy = midy
else:
print >> sys.stderr, "Broken logic", lmin, lmid, lmax, miny, midy, miny
break
print >> sys.stderr, miny, length(ys, miny)
print length(ys, miny) + maxx - minx
|
mit
| 5,430,850,358,133,231,000
| 21.0625
| 79
| 0.5
| false
| 2.757813
| false
| false
| false
|
heldergg/labs
|
lib/hc/draw.py
|
1
|
8228
|
# -*- coding: utf-8 -*-
'''
This module produces SVG files with hemicycles representations.
'''
##
# Imports
##
from pysvg.structure import svg, g, defs, use, title
from pysvg.builders import TransformBuilder, ShapeBuilder
from pysvg.shape import path
from pysvg.style import style
from math import sin, cos, pi, floor
import os.path
from chairs import Hemicycle
##
# Config
##
SVGBASE = '/home/helder/prg/hc/hc/share/hc'
TRANSX = 0
TRANSY = -50
##
# Exceptions
##
class SVGError(Exception):
pass
##
# Utils
##
def degrees(angle):
'''Converts radians to degrees'''
return angle * 180 / pi
##
# SGV
##
class HemicycleSGV(object):
'''
This class creates svg representations of hemicycles.
'''
def __init__(self, hc, parties=None):
'''
hc - hemicycle object
parties - list with the following structure:
[ { 'initials': '<legend name>',
'result': <number of chairs>,
'image': '<svg filename>,
'color': <foreground color>,
'background': <background color>
}, ...
]
'''
self.hc = hc
self.parties = parties
self.chairs = []
# Check if the number of chairs in the results matches the
# calculated hemicycle number of chairs.
nchairs = sum([party['result'] for party in parties])
if nchairs != hc.nchairs:
raise SVGError(
'Results chair number don\'t match the hemicycle size.')
def chair_dist(self):
'''Chair distribution on the hemicycle'''
def smallest(parties, first_row):
'''Returns the number of chairs for the smalest party in parties'''
remaining = (sum([party['result'] for party in parties]) -
sum([sum(party['seats']) for party in parties]))
smallest_party = parties[0]
dist_seats = sum(smallest_party['seats'])
remaining_seats = smallest_party['result'] - dist_seats
percent = float(remaining_seats) / remaining
nc = int(floor(percent * first_row))
if sum(smallest_party['seats']) == smallest_party['result']:
return 0
return 1 if not nc else nc
def fill_row(parties, seats):
parties.sort(key=lambda party: party['result'])
# Find how many seats we have for each party on this row
for i in range(len(parties)):
party = parties[i]
party_row_seats = smallest(parties[i:], seats)
party['seats'].append(party_row_seats)
seats -= party_row_seats
parties = self.parties
for party in parties:
party['seats'] = []
hc = [row['nchairs'] for row in self.hc.rows()]
for row in hc:
fill_row(parties, row)
parties.sort(key=lambda party: party['order'])
# Create an hemicicle matrix, each row is empty, we'll fill the
# rows afterwards
chairs = []
for i in range(self.hc.nrows):
row = []
for j in range(len(parties)):
party = parties[j]
for seat in range(party['seats'][i]):
row.append(j)
chairs.append(row)
self.chairs = chairs
def svg_dimention(self):
# The SVG coord system origin is on the lower left:
height = self.hc.outer_radius()
width = self.hc.outer_radius() * 2
return width, height
def chair_svg(self, row, column, id_attr):
angle, x, y = self.hc.chair_location(row, column)
width, height = self.svg_dimention()
# This '30' is half the size of the svg chair, should be configured
x = x + width / 2 - 30 * cos(pi / 2 - angle) + TRANSX
y = height - y - 30 * sin(pi / 2 - angle) + TRANSY
# Chair translation and rotation parametrization
th = TransformBuilder()
th.setRotation('%f' % (90 - degrees(angle)))
th.setTranslation('%f,%f' % (x, y))
u = use()
u._attributes['xlink:href'] = '#%s' % id_attr
u.set_transform(th.getTransform())
return u
def chair(self, id_attr, color_1, color_2):
head = ShapeBuilder().createCircle(30, 25, 20, stroke='black', strokewidth=5.0, fill=color_1)
head.set_class('head')
body = path(pathData="M 19.264266,38.267870 C 12.892238,41.659428 9.0221978,48.396703 6.6126745,55.405840 L 51.476471,55.405840 C 49.270169,48.545436 45.682644,41.911786 39.811885,38.267870 C 33.901416,38.010889 26.459633,38.267870 19.264266,38.267870 z ")
body.set_style('stroke-width:5.0;stroke:black;fill:%s;' % color_2)
body.set_class('body')
th = TransformBuilder()
th.setScaling('0.8', '0.8')
group = g()
group.addElement(body)
group.addElement(head)
group.set_id(id_attr)
group.set_transform(th.getTransform())
return group
def defs(self):
d = defs()
for party in self.parties:
d.addElement(self.chair(party['initials'], party['color_1'], party['color_2']))
return d
def svg(self):
if not self.chairs:
raise SVGError('You need to calculate the chair distribution.')
width, height = self.svg_dimention()
# SVG doc
s = svg(height="100%", width="100%")
s.set_viewBox("0 0 %d %d" % (width, height))
t = title()
t.appendTextContent('Parlamento')
s.addElement(t)
# Create the party groups
groups = {}
for i in range(len(self.parties)):
party = self.parties[i]
groups[i] = g()
# groups[i].set_fill(party['color'])
groups[i].set_id('%s_group' % party['initials'])
t = title()
t.appendTextContent('Grupo Parlamentar do %s' % party['initials'])
groups[i].addElement(t)
# Add the chair shape definition
s.addElement(self.defs())
# Distribute the chairs
for row in range(len(self.chairs)):
for col in range(len(self.chairs[row])):
angle, x, y = self.hc.chair_location(row, col)
x = x + width / 2
y = height - y
groups[self.chairs[row][col]].addElement(self.chair_svg(
row, col, self.parties[self.chairs[row][col]]['initials']))
# Insert the party groups into the svg
for i in range(len(self.parties)):
s.addElement(groups[i])
return s.getXML()
if __name__ == '__main__':
# Vote count
parties = [{'initials': 'BE', 'order': 0, 'result': 8, 'image': 'cadeira-BE.svg'},
{'initials': 'CDU', 'order': 1, 'result': 16, 'image': 'cadeira-CDU.svg'},
{'initials': 'PS', 'order': 2, 'result': 74, 'image': 'cadeira-PS.svg'},
{'initials': 'PSD', 'order': 3, 'result': 108, 'image': 'cadeira-PSD.svg'},
{'initials': 'CDS', 'order': 4, 'result': 24, 'image': 'cadeira-CDS.svg'},
]
parties = [
{'name': 'Bloco de Esquerda', 'initials': 'BE',
'order': 0, 'result': 7, 'color_1': 'purple', 'color_2': 'red'},
{'name': 'Coligação Democratica Unitária', 'initials': 'CDU',
'order': 1, 'result': 16, 'color_1': 'red', 'color_2': 'yellow'},
{'name': 'Partido Socialista', 'initials': 'PS',
'order': 2, 'result': 74, 'color_1': 'pink', 'color_2': 'pink'},
{'name': 'Partido Social Democrata', 'initials': 'PSD',
'order': 3, 'result': 109, 'color_1': 'orange', 'color_2': 'orange'},
{'name': 'Centro Democrático Social', 'initials': 'CDS',
'order': 4, 'result': 24, 'color_1': 'blue', 'color_2': 'white'},
]
# Create the hemicycle
hc = Hemicycle(chair_width=60,
chair_height=60,
nchairs=230,
nrows=8,
hangle=(4 / 3) * pi)
# Graphical representation of the hemicycle
hc_svg = HemicycleSGV(hc, parties)
hc_svg.chair_dist()
print hc_svg.svg()
|
gpl-3.0
| -4,524,365,662,919,559,700
| 30.389313
| 264
| 0.544504
| false
| 3.396943
| false
| false
| false
|
DjangoUnchained-CRUD/python
|
tutorial/tutorial/tutorial/urls.py
|
1
|
1193
|
"""tutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from rest_framework import routers
from quickstart import views
from django.contrib import admin
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'groups', views.GroupViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
|
mit
| -8,288,057,378,176,101,000
| 36.28125
| 82
| 0.720872
| false
| 3.582583
| false
| false
| false
|
bburan/psiexperiment
|
psi/data/io/summarize_abr.py
|
1
|
11625
|
import argparse
from glob import glob
import os.path
import numpy as np
import pandas as pd
from psi.data.io import abr
columns = ['frequency', 'level', 'polarity']
def process_folder(folder, filter_settings=None):
glob_pattern = os.path.join(folder, '*abr*')
filenames = glob(glob_pattern)
process_files(filenames, filter_settings=filter_settings)
def process_files(filenames, offset=-0.001, duration=0.01,
filter_settings=None, reprocess=False):
for filename in filenames:
try:
processed = process_file(filename, offset, duration,
filter_settings, reprocess)
if processed:
print(f'\nProcessed {filename}\n')
else:
print('*', end='', flush=True)
except Exception as e:
raise
print(f'\nError processing {filename}\n{e}\n')
def _get_file_template(fh, offset, duration, filter_settings, suffix=None):
base_string = f'ABR {offset*1e3:.1f}ms to {(offset+duration)*1e3:.1f}ms'
if filter_settings == 'saved':
settings = _get_filter(fh)
if not settings['digital_filter']:
filter_string = None
else:
lb = settings['lb']
ub = settings['ub']
filter_string = f'{lb:.0f}Hz to {ub:.0f}Hz filter'
elif filter_settings is None:
filter_string = None
else:
lb = filter_settings['lb']
ub = filter_settings['ub']
filter_string = f'{lb:.0f}Hz to {ub:.0f}Hz filter'
order = filter_settings['order']
if order != 1:
filter_string = f'{order:.0f} order {filter_string}'
if filter_string is None:
file_string = f'{base_string}'
else:
file_string = f'{base_string} with {filter_string}'
if suffix is not None:
file_string = f'{file_string} {suffix}'
print(file_string)
return f'{file_string} {{}}.csv'
def _get_filter(fh):
if not isinstance(fh, (abr.ABRFile, abr.ABRSupersetFile)):
fh = abr.load(fh)
return {
'digital_filter': fh.get_setting_default('digital_filter', True),
'lb': fh.get_setting_default('digital_highpass', 300),
'ub': fh.get_setting_default('digital_lowpass', 3000),
# Filter order is not currently an option in the psiexperiment ABR
# program so it defaults to 1.
'order': 1,
}
def _get_epochs(fh, offset, duration, filter_settings, reject_ratio=None):
# We need to do the rejects in this code so that we can obtain the
# information for generating the CSV files. Set reject_threshold to np.inf
# to ensure that nothing gets rejected.
kwargs = {'offset': offset, 'duration': duration, 'columns': columns,
'reject_threshold': np.inf}
if filter_settings is None:
return fh.get_epochs(**kwargs)
if filter_settings == 'saved':
settings = _get_filter(fh)
if not settings['digital_filter']:
return fh.get_epochs(**kwargs)
lb = settings['lb']
ub = settings['ub']
order = settings['order']
kwargs.update({'filter_lb': lb, 'filter_ub': ub, 'filter_order': order})
return fh.get_epochs_filtered(**kwargs)
lb = filter_settings['lb']
ub = filter_settings['ub']
order = filter_settings['order']
kwargs.update({'filter_lb': lb, 'filter_ub': ub, 'filter_order': order})
return fh.get_epochs_filtered(**kwargs)
def _match_epochs(*epochs):
def _match_n(df):
grouping = df.groupby(['dataset', 'polarity'])
n = grouping.size().unstack()
if len(n) < 2:
return None
n = n.values.ravel().min()
return pd.concat([g.iloc[:n] for _, g in grouping])
epochs = pd.concat(epochs, keys=range(len(epochs)), names=['dataset'])
matched = epochs.groupby(['frequency', 'level']).apply(_match_n)
return [d.reset_index('dataset', drop=True) for _, d in \
matched.groupby('dataset', group_keys=False)]
def is_processed(filename, offset, duration, filter_settings, suffix=None):
t = _get_file_template(filename, offset, duration, filter_settings, suffix)
file_template = os.path.join(filename, t)
raw_epoch_file = file_template.format('individual waveforms')
mean_epoch_file = file_template.format('average waveforms')
n_epoch_file = file_template.format('number of epochs')
return os.path.exists(raw_epoch_file) and \
os.path.exists(mean_epoch_file) and \
os.path.exists(n_epoch_file)
def process_files_matched(filenames, offset, duration, filter_settings,
reprocess=True, suffix=None):
epochs = []
for filename in filenames:
fh = abr.load(filename)
if len(fh.erp_metadata) == 0:
raise IOError('No data in file')
e = _get_epochs(fh, offset, duration, filter_settings)
epochs.append(e)
epochs = _match_epochs(*epochs)
for filename, e in zip(filenames, epochs):
# Generate the filenames
t = _get_file_template(fh, offset, duration, filter_settings, suffix)
file_template = os.path.join(filename, t)
raw_epoch_file = file_template.format('individual waveforms')
mean_epoch_file = file_template.format('average waveforms')
n_epoch_file = file_template.format('number of epochs')
# Check to see if all of them exist before reprocessing
if not reprocess and \
(os.path.exists(raw_epoch_file) and \
os.path.exists(mean_epoch_file) and \
os.path.exists(n_epoch_file)):
continue
epoch_n = e.groupby(columns[:-1]).size()
epoch_mean = e.groupby(columns).mean().groupby(columns[:-1]).mean()
# Write the data to CSV files
epoch_n.to_csv(n_epoch_file, header=True)
epoch_mean.columns.name = 'time'
epoch_mean.T.to_csv(mean_epoch_file)
e.columns.name = 'time'
e.T.to_csv(raw_epoch_file)
def process_file(filename, offset, duration, filter_settings, reprocess=False,
n_epochs='auto', suffix=None):
'''
Extract ABR epochs, filter and save result to CSV files
Parameters
----------
filename : path
Path to ABR experiment. If it's a set of ABR experiments, epochs across
all experiments will be combined for the analysis.
offset : sec
The start of the epoch to extract, in seconds, relative to tone pip
onset. Negative values can be used to extract a prestimulus baseline.
duration: sec
The duration of the epoch to extract, in seconds, relative to the
offset. If offset is set to -0.001 sec and duration is set to 0.01 sec,
then the epoch will be extracted from -0.001 to 0.009 sec re tone pip
onset.
filter_settings : {None, 'saved', dict}
If None, no additional filtering is done. If 'saved', uses the digital
filter settings that were saved in the ABR file. If a dictionary, must
contain 'lb' (the lower bound of the passband in Hz) and 'ub' (the
upper bound of the passband in Hz).
reprocess : bool
If True, reprocess the file even if it already has been processed for
the specified filter settings.
n_epochs : {None, 'auto', int, dict}
If None, all epochs will be used. If 'auto', use the value defined at
acquisition time. If integer, will limit the number of epochs per
frequency and level to this number. If dict, the key must be a tuple of
(frequency, level) and the value will indicate the number of epochs to
use.
suffix : {None, str}
Suffix to use when creating save filenames.
'''
fh = abr.load(filename)
if len(fh.erp_metadata) == 0:
raise IOError('No data in file')
# Generate the filenames
t = _get_file_template(fh, offset, duration, filter_settings, suffix)
file_template = os.path.join(filename, t)
raw_epoch_file = file_template.format('individual waveforms')
mean_epoch_file = file_template.format('average waveforms')
n_epoch_file = file_template.format('number of epochs')
reject_ratio_file = file_template.format('reject ratio')
# Check to see if all of them exist before reprocessing
if not reprocess and \
(os.path.exists(raw_epoch_file) and \
os.path.exists(mean_epoch_file) and \
os.path.exists(n_epoch_file) and \
os.path.exists(reject_ratio_file)):
return False
# Load the epochs
epochs = _get_epochs(fh, offset, duration, filter_settings)
# Apply the reject
reject_threshold = fh.get_setting('reject_threshold')
m = np.abs(epochs) < reject_threshold
m = m.all(axis=1)
epochs = epochs.loc[m]
if n_epochs is not None:
if n_epochs == 'auto':
n_epochs = fh.get_setting('averages')
n = int(np.floor(n_epochs / 2))
epochs = epochs.groupby(columns) \
.apply(lambda x: x.iloc[:n])
epoch_reject_ratio = 1-m.groupby(columns[:-1]).mean()
epoch_mean = epochs.groupby(columns).mean() \
.groupby(columns[:-1]).mean()
# Write the data to CSV files
epoch_reject_ratio.name = 'epoch_reject_ratio'
epoch_reject_ratio.to_csv(reject_ratio_file, header=True)
epoch_reject_ratio.name = 'epoch_n'
epoch_n = epochs.groupby(columns[:-1]).size()
epoch_n.to_csv(n_epoch_file, header=True)
epoch_mean.columns.name = 'time'
epoch_mean.T.to_csv(mean_epoch_file)
epochs.columns.name = 'time'
epochs.T.to_csv(raw_epoch_file)
return True
def main_auto():
parser = argparse.ArgumentParser('Filter and summarize ABR files in folder')
parser.add_argument('folder', type=str, help='Folder containing ABR data')
args = parser.parse_args()
process_folder(args.folder, filter_settings='saved')
def main():
parser = argparse.ArgumentParser('Filter and summarize ABR data')
parser.add_argument('filenames', type=str,
help='Filename', nargs='+')
parser.add_argument('--offset', type=float,
help='Epoch offset',
default=-0.001)
parser.add_argument('--duration', type=float,
help='Epoch duration',
default=0.01)
parser.add_argument('--filter-lb', type=float,
help='Highpass filter cutoff',
default=None)
parser.add_argument('--filter-ub', type=float,
help='Lowpass filter cutoff',
default=None)
parser.add_argument('--order', type=float,
help='Filter order',
default=None)
parser.add_argument('--reprocess',
help='Redo existing results',
action='store_true')
args = parser.parse_args()
if args.filter_lb is not None or args.filter_ub is not None:
filter_settings = {
'lb': args.filter_lb,
'ub': args.filter_ub,
'order': args.order,
}
else:
filter_settings = None
process_files(args.filenames, args.offset, args.duration, filter_settings,
args.reprocess)
def main_gui():
import enaml
from enaml.qt.qt_application import QtApplication
with enaml.imports():
from .summarize_abr_gui import SummarizeABRGui
app = QtApplication()
view = SummarizeABRGui()
view.show()
app.start()
if __name__ == '__main__':
main_gui()
|
mit
| -145,646,216,243,829,300
| 36.140575
| 80
| 0.608516
| false
| 3.800262
| false
| false
| false
|
shootsoft/practice
|
lintcode/NineChapters/06/add-two-numbers.py
|
1
|
1272
|
__author__ = 'yinjun'
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param l1: the first list
# @param l2: the second list
# @return: the sum list of l1 and l2
def addLists(self, l1, l2):
# write your code here
h = ListNode(0)
l = h
add = 0
while l1!=None and l2!=None:
l.next = ListNode(l1.val + l2.val + add)
if l.next.val >= 10:
add = 1
l.next.val -=10
else:
add = 0
l = l.next
l1 = l1.next
l2 = l2.next
while l1 != None:
l.next = ListNode(l1.val + add)
if l.next.val >= 10:
add = 1
l.next.val -= 10
else:
add = 0
l = l.next
l1 = l1.next
while l2 != None:
l.next = ListNode(l2.val + add)
if l.next.val >= 10:
add = 1
l.next.val -= 10
else:
add = 0
l = l.next
l2 = l2.next
if add > 0:
l.next = ListNode(add)
return h.next
|
apache-2.0
| -7,880,751,064,825,004,000
| 23.461538
| 52
| 0.400943
| false
| 3.494505
| false
| false
| false
|
GoogleCloudPlatform/cloud-ops-sandbox
|
src/loadgenerator/sre_recipe_utils.py
|
1
|
6473
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""
This module contains code for intergrating SRE Recipes with LoadGen
"""
import time
import gevent
from flask import request
from flask import jsonify
from flask import make_response
from functools import wraps
from locust.env import Environment
from locust_tasks import get_sre_recipe_user_class
def return_as_json_response(fn):
"""
Python helper decorator for returning status code and JSON responses from
a Flask request handler.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
try:
body = fn(*args, **kwargs)
resp = make_response(jsonify(body), 200)
resp.headers["Content-Type"] = 'application/json'
return resp
except LookupError as e:
resp = make_response(jsonify({"err": str(e)}), 404)
resp.headers["Content-Type"] = 'application/json'
return resp
except ValueError as e:
resp = make_response(jsonify({"err": str(e)}), 400)
resp.headers["Content-Type"] = 'application/json'
return resp
except Exception as e:
resp = make_response(jsonify({"err": str(e)}), 500)
resp.headers["Content-Type"] = 'application/json'
return resp
return wrapper
def init_sre_recipe_api(env):
"""
Attach custom Flask request handlers to a locust environment's flask app
"""
if env and env.web_ui:
@env.web_ui.app.route("/api/ping")
@return_as_json_response
def ping():
return {"msg": "pong"}
@env.web_ui.app.route("/api/user_count")
@return_as_json_response
def user_count():
"""
Return the number of total users spawend for load generation.
Response:
- user_count: int
"""
return {"user_count": env.runner.user_count}
@env.web_ui.app.route("/api/spawn/<user_identifier>", methods=['POST'])
@return_as_json_response
def spawn_by_user_identifier(user_identifier=None):
"""
Spawn a number of users with the SRE Recipe user identifer.
Form Paramters:
- user_count: Required. The total number of users to spawn
- spawn_rate: Required. The spawn rate for the users.
- stop_after: Optional. If specified, run the load generation only
for the given number of seconds.
Response:
On success, returns status code 200 and an acknowledgement 'msg'
On error, returns status code 400 for invalid arguments, and 404
if load pattern for 'user_identifier' is not found, as well as an
'err' message.
"""
# Required Query Parameters
user_count = request.form.get("user_count", default=None, type=int)
spawn_rate = request.form.get("spawn_rate", default=None, type=int)
# The function returns None, if user_identifier is not found
user_class = get_sre_recipe_user_class(user_identifier)
if user_count is None:
raise ValueError(f"Must specify a valid, non-empty, integer value for query parameter 'user_count': {request.form.get('user_count', default=None)}")
elif spawn_rate is None:
raise ValueError(f"Must specify a valid, non-empty, integer value for query parameter 'spawn_rate': {request.form.get('spawn_rate', default=None)}")
elif user_count <= 0:
raise ValueError(f"Query parameter 'user_count' must be positive: {user_count}")
elif spawn_rate <= 0:
raise ValueError(f"Query parameter 'spawn_rate' must be positive: {spawn_rate}")
elif user_class is None:
raise LookupError(f"Cannot find SRE Recipe Load for: {user_identifier}")
# Optional Query Parameters
stop_after = request.form.get("stop_after", default=None, type=int)
if stop_after is not None and stop_after <= 0:
raise ValueError(f"Query parameter 'stop_after' must be positive: {stop_after}")
elif stop_after is None and "stop_after" in request.form:
raise ValueError(f"stop_after must be valid integer value: {request.form['stop_after']}")
# We currently only support running one SRE Recipe load each time
# for implementation simplicity.
if env.runner.user_count > 0:
env.runner.quit() # stop existing load generating users, if any
env.user_classes = [user_class] # replace with the new users
def spawn_when_all_users_stopped():
# Wait at most 10 seconds until all existing users are stopped, then
# start generating new load with the new user types
tries = 0
while tries < 10:
if env.runner.user_count == 0:
env.runner.start(user_count, spawn_rate)
break
tries += 1
time.sleep(1)
# Start anyway.
if tries == 10:
env.runner.start(user_count, spawn_rate)
# Stop later if applicable
if stop_after:
gevent.spawn_later(stop_after,
lambda: env.runner.quit())
gevent.spawn(spawn_when_all_users_stopped);
return {"msg": f"Spawn Request Received: spawning {user_count} users at {spawn_rate} users/second"}
@env.web_ui.app.route("/api/stop", methods=['POST'])
@return_as_json_response
def stop_all():
"""Stop all currently running users"""
env.runner.quit()
return {"msg": "All users stopped"}
|
apache-2.0
| 5,255,987,713,217,035,000
| 41.032468
| 164
| 0.591534
| false
| 4.347213
| false
| false
| false
|
suutari-ai/shoop
|
shuup/front/__init__.py
|
2
|
2946
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.apps import AppConfig
from shuup.apps.settings import validate_templates_configuration
class ShuupFrontAppConfig(AppConfig):
name = "shuup.front"
verbose_name = "Shuup Frontend"
label = "shuup_front"
provides = {
"admin_category_form_part": [
"shuup.front.admin_module.sorts_and_filters.form_parts.ConfigurationCategoryFormPart"
],
"admin_module": [
"shuup.front.admin_module.CartAdminModule",
],
"admin_shop_form_part": [
"shuup.front.admin_module.sorts_and_filters.form_parts.ConfigurationShopFormPart"
],
"notify_event": [
"shuup.front.notify_events:OrderReceived",
"shuup.front.notify_events:ShipmentCreated",
"shuup.front.notify_events:ShipmentDeleted",
"shuup.front.notify_events:PaymentCreated",
"shuup.front.notify_events:RefundCreated",
],
"notify_script_template": [
"shuup.front.notify_script_templates:PaymentCreatedEmailScriptTemplate",
"shuup.front.notify_script_templates:RefundCreatedEmailScriptTemplate",
"shuup.front.notify_script_templates:ShipmentDeletedEmailScriptTemplate",
"shuup.front.notify_script_templates:OrderConfirmationEmailScriptTemplate",
"shuup.front.notify_script_templates:ShipmentCreatedEmailScriptTemplate",
],
"front_extend_product_list_form": [
"shuup.front.forms.product_list_modifiers.CategoryProductListFilter",
"shuup.front.forms.product_list_modifiers.LimitProductListPageSize",
"shuup.front.forms.product_list_modifiers.ProductPriceFilter",
"shuup.front.forms.product_list_modifiers.ProductVariationFilter",
"shuup.front.forms.product_list_modifiers.SortProductListByCreatedDate",
"shuup.front.forms.product_list_modifiers.SortProductListByAscendingCreatedDate",
"shuup.front.forms.product_list_modifiers.SortProductListByName",
"shuup.front.forms.product_list_modifiers.SortProductListByPrice",
"shuup.front.forms.product_list_modifiers.ManufacturerProductListFilter",
],
"front_product_order_form": [
"shuup.front.forms.order_forms:VariableVariationProductOrderForm",
"shuup.front.forms.order_forms:SimpleVariationProductOrderForm",
"shuup.front.forms.order_forms:SimpleProductOrderForm",
],
}
def ready(self):
# connect signals
import shuup.front.notify_events # noqa: F401
validate_templates_configuration()
default_app_config = "shuup.front.ShuupFrontAppConfig"
|
agpl-3.0
| 1,166,574,447,828,370,000
| 43.636364
| 97
| 0.682281
| false
| 3.668742
| true
| false
| false
|
syl20bnr/i3ci
|
_deprecated/scripts/menu/i3_actions.py
|
1
|
17882
|
#!/usr/bin/env python
# author: syl20bnr (2013)
# goal: i3 actions module.
import os
from subprocess import Popen, PIPE
import i3
from Xlib import display
import i3ci_menu
from constants import DMENU_MAX_ROW, DMENU_FONT, DMENU_HEIGHT
from feeders import (cur_workspace,
cur_workspaces,
free_workspaces,
cur_output)
MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
# DMENU = os.path.normpath(os.path.join(MODULE_PATH, '../../bin/i3ci_menu'))
class Action(object):
''' Define an i3-msg action. '''
def __init__(self):
self._actions = []
def add_action(self, action, args=None):
if args:
action = action.__call__(self, *args)
else:
action = action.__call__(self)
self._actions.append(action)
def get_command(self):
return 'i3-msg ' + ','.join(self._actions)
def process(self):
Popen(self.get_command(), shell=True)
def exec_(self, app):
return 'exec {0}'.format(app)
def set_mode(self, mode):
''' Set the specified mode '''
return 'mode {0}'.format(mode)
def set_default_mode(self):
''' Set the default mode '''
return self.set_mode('default')
def jump_to_window(self, window):
''' Jump to the specified window. '''
return '[con_id={0}] focus'.format(window)
def jump_to_workspace(self, workspace):
''' Jump to the given workspace.
Current used workspaces are prefixed with a dot '.'
Workspace '`' means "back_and_forth" command.
Workspace '=' is the scratch pad
'''
if workspace == '`':
return "workspace back_and_forth"
elif workspace == '=':
return "scratchpad show"
else:
return "workspace {0}".format(workspace)
def send_window_to_output(self, output):
''' Send the current window to the specified output. '''
return "move to output {0}".format(output)
def send_workspace_to_output(self, output):
''' Send the current workspace to the specified output. '''
return "move workspace to output {0}".format(output)
def send_window_to_workspace(self, workspace):
''' Send the current window to the passed workspace. '''
if workspace == '`':
return "move workspace back_and_forth"
elif workspace == '=':
return "move scratchpad"
else:
return "move workspace {0}".format(workspace)
def focus_output(self, output):
''' Focus the specified output. '''
return "focus output {0}".format(output)
def focus_window(self, id_):
''' Focus the specified output. '''
return "[con_id={0}] focus".format(id_)
def mark_window(self, id_, mark):
''' Set the passed mark to the window with the passed id_. '''
return '[con_id={0}] mark {1}'.format(id_, mark)
def unmark_window(self, mark):
''' Disable the passed mark. '''
return 'unmark {0}'.format(mark)
def rename_workspace(self, from_, to):
''' Rename the workspace '''
return '\'rename workspace "{0}" to "{1}"\''.format(from_, to)
def cmd(self, cmd):
# wonderful method :-)
return cmd
# ----------------------------------------------------------------------------
# Action groups
# ----------------------------------------------------------------------------
def default_mode(action=None):
''' Add or perform an action to set the default mode. '''
if action:
action.add_action(Action.set_default_mode)
else:
action = Action()
action.add_action(Action.set_default_mode)
action.process()
def get_max_row(rcount):
return max([0, min([DMENU_MAX_ROW, rcount])])
def launch_app(feeder, app=None, output='all', free=False):
''' Launch an application on the specified monitor.
output='all' means the current workspace on the current monitor.
If free is true then the application is opened in a new workspace.
'''
reply = app
if not reply:
input_ = feeder.feed().encode('utf-8')
size = get_max_row(len(input_))
proc = i3ci_menu.call(lmax=size,
f=DMENU_FONT,
h=DMENU_HEIGHT)
reply = proc.communicate(input_)[0]
if reply:
reply = reply.decode('utf-8')
if '-cd' in reply:
# MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
# DMENU = os.path.normpath(os.path.join(MODULE_PATH,
# '../../bin/i3ci_menu'))
xcwd = Popen('xcwd', stdin=PIPE, stdout=PIPE).communicate()[0]
reply = '"' + reply + ' ' + xcwd + '"'
if not free and (output == 'all' or
output == cur_output.get_current_output()):
# open on the current workspace
action = Action()
action.add_action(Action.exec_, (reply,))
default_mode(action)
action.process()
if not free and (output != 'all' and
output != cur_output.get_current_output()):
# open on the visible workspace on another output
otherw = cur_workspace.feed(output)
action = Action()
action.add_action(Action.jump_to_workspace, (otherw,))
action.add_action(Action.exec_, (reply,))
default_mode(action)
action.process()
elif free and (output == 'all' or
output == cur_output.get_current_output()):
# free workspace on the current output
freew = free_workspaces.get_free_workspaces()[0]
action = Action()
action.add_action(Action.jump_to_workspace, (freew,))
action.add_action(Action.exec_, (reply,))
default_mode(action)
action.process()
elif free and (output != 'all' and
output != cur_output.get_current_output()):
# free workspace on another output
freew = free_workspaces.get_free_workspaces()[0]
action = Action()
action.add_action(Action.focus_output, (output,))
action.add_action(Action.jump_to_workspace, (freew,))
action.add_action(Action.exec_, (reply,))
default_mode(action)
action.process()
else:
default_mode()
def clone_window(output='all', free=False):
from feeders import cur_window
win = cur_window.get_current_window()[0]
dpy = display.Display()
xwin = dpy.create_resource_object('window', win['window'])
inst, _ = xwin.get_wm_class()
if inst:
if inst == 'urxvt':
inst += ' -cd'
launch_app(None, inst, output, free)
def jump_to_window(feeder, inst, output='all'):
''' Jump to the window chosen by the user using i3ci_menu. '''
(wins, d) = feeder.feed(inst, output)
size = get_max_row(len(wins))
proc = i3ci_menu.call(f=DMENU_FONT,
lmax=size,
sb='#b58900')
reply = proc.communicate('\n'.join(wins).encode('utf-8'))[0]
if reply:
reply = reply.decode('utf-8')
action = Action()
action.add_action(Action.jump_to_window, (d.get(reply),))
default_mode(action)
action.process()
else:
default_mode()
def jump_to_workspace(feeder):
''' Jump to the workspace chosen by the user using i3ci_menu. '''
input_ = '\n'.join(feeder.feed()).encode('utf-8')
size = get_max_row(len(input_))
proc = i3ci_menu.call(h=DMENU_HEIGHT,
lmax=size,
r=True,
sb='#d33682')
reply = proc.communicate(input_)[0]
if reply:
reply = reply.decode('utf-8')
action = Action()
action.add_action(Action.jump_to_workspace, (reply,))
default_mode(action)
action.process()
else:
default_mode()
def jump_to_currently_used_workspace(feeder, output='all'):
''' Jump to a curently used workspace on the specified outputs
and chosen by the user using i3ci_menu.
'''
input_ = '\n'.join(feeder.feed(output)).encode('utf-8')
size = get_max_row(len(input_))
proc = i3ci_menu.call(f=DMENU_FONT,
h=DMENU_HEIGHT,
lmax=size,
r=True,
sb='#268bd2')
reply = proc.communicate(input_)[0]
if reply:
reply = reply.decode('utf-8')
action = Action()
action.add_action(Action.jump_to_workspace, (reply,))
default_mode(action)
action.process()
else:
default_mode()
def send_workspace_to_output(feeder, output='all'):
''' Send the current workspace to the selected output. '''
if output == 'all':
# be sure that the workspace exists
cur_wks = cur_workspace.get_current_workspace()
if not cur_wks:
return
outs = feeder.get_outputs_dictionary()
# remove the current output
coutput = cur_output.get_current_output()
fouts = [k for k, v in outs.iteritems() if v != coutput]
input_ = '\n'.join(sorted(fouts)).encode('utf-8')
size = get_max_row(len(input_))
proc = i3ci_menu.call(f=DMENU_FONT,
h=DMENU_HEIGHT,
lmax=size,
r=False,
sb='#268bd2')
reply = proc.communicate(input_)[0]
if reply:
reply = reply.decode('utf-8')
output = outs[reply]
action = Action()
action.add_action(Action.send_workspace_to_output, (output,))
default_mode(action)
action.process()
def send_window_to_output(feeder, output='all'):
''' Send the current window to the selected output. '''
if output == 'all':
outs = feeder.get_outputs_dictionary()
# remove the current output
coutput = cur_output.get_current_output()
fouts = [k for k, v in outs.iteritems() if v != coutput]
input_ = '\n'.join(sorted(fouts)).encode('utf-8')
size = get_max_row(len(input_))
proc = i3ci_menu.call(f=DMENU_FONT,
h=DMENU_HEIGHT,
lmax=size,
r=False,
sb='#268bd2')
reply = proc.communicate(input_)[0]
if reply:
reply = reply.decode('utf-8')
output = outs[reply]
action = Action()
action.add_action(Action.send_window_to_output, (output,))
action.add_action(Action.focus_output, (output,))
default_mode(action)
action.process()
def send_window_to_workspace(feeder):
''' Send the current window to the selected workspace. '''
input_ = '\n'.join(feeder.feed()).encode('utf-8')
size = get_max_row(len(input_))
proc = i3ci_menu.call(f=DMENU_FONT,
h=DMENU_HEIGHT,
lmax=size,
r=True,
sb='#6c71c4')
reply = proc.communicate(input_)[0]
if reply:
reply = reply.decode('utf-8')
action = Action()
action.add_action(Action.send_window_to_workspace, (reply,))
default_mode(action)
action.process()
else:
default_mode()
def send_window_to_free_workspace(feeder, output):
''' Send the current window to a free workspace on the given output. '''
freew = feeder.feed()
if freew:
from feeders import cur_output
w = freew[0]
action = Action()
action.add_action(Action.send_window_to_workspace, (w,))
action.add_action(Action.jump_to_workspace, (w,))
if output != 'all' and output != cur_output.feed():
action.add_action(Action.send_workspace_to_output, (output,))
default_mode(action)
action.process()
else:
default_mode()
def send_window_to_used_workspace(feeder, output):
''' Send the current window to a used workspace on the given output. '''
input_ = '\n'.join(feeder.feed(output)).encode('utf-8')
size = get_max_row(len(input_))
proc = i3ci_menu.call(f=DMENU_FONT,
h=DMENU_HEIGHT,
lmax=size,
r=True,
sb='#6c71c4')
reply = proc.communicate(input_)[0]
if reply:
reply = reply.decode('utf-8')
action = Action()
action.add_action(Action.send_window_to_workspace, (reply,))
action.add_action(Action.jump_to_workspace, (reply,))
default_mode(action)
action.process()
else:
default_mode()
def _choose_other_windows(feeder, output):
'''
Launch a i3ci_menu instance to select a window which is not on the current
worspace.
Return a tuple composed of the window name and the window id.
Return None if nothing has been selected.
'''
(wins, d) = feeder.feed(output=output)
size = get_max_row(len(wins))
proc = i3ci_menu.call(f=DMENU_FONT,
lmax=size,
sb='#6c71c4')
ws = cur_workspace.feed()
excluded_wins = _get_window_ids_of_workspace(ws)
if excluded_wins:
# remove the wins of the current output from the list
wins = [k for k, v in d.iteritems() if v not in excluded_wins]
reply = proc.communicate('\n'.join(wins).encode('utf-8'))[0]
if reply:
reply = reply.decode('utf-8')
return reply, d.get(reply)
else:
return None, None
def send_window_to_win_workspace(feeder, output='all'):
''' Send the current window to the workspace of the selected window. '''
win, win_id = _choose_other_windows(feeder, output)
if win:
ws = _get_window_workspace(win_id)
action = Action()
action.add_action(Action.send_window_to_workspace, (ws,))
action.add_action(Action.jump_to_workspace, (ws,))
default_mode(action)
action.process()
else:
default_mode()
def bring_window(feeder, output='all'):
''' Bring the chosen window to the current workspace. '''
win, win_id = _choose_other_windows(feeder, output)
if win:
# TODO
ws = cur_workspace.feed()
other_ws = _get_window_workspace(win_id)
action = Action()
# switch focus to the window to bring
action.add_action(Action.jump_to_workspace, (other_ws,))
action.focus_window(win_id)
# send the window to the original workspace
action.add_action(Action.send_window_to_workspace, (ws,))
action.add_action(Action.jump_to_workspace, (ws,))
# make sure the new window is focused at the end
action.focus_window(win_id)
# print action.get_command()
default_mode(action)
action.process()
else:
default_mode()
def focus_workspace(mon):
wks = cur_workspace.feed(mon)
action = Action()
action.add_action(Action.jump_to_workspace, (wks,))
default_mode(action)
action.process()
def focus_nth_window(nth, ws=None):
''' Roughly focus the nth window in the hierarchy (limited to 10 first) '''
wins = _get_windows_from_workspace(ws)
action = Action()
if nth == 0:
nth = 10
action.add_action(Action.focus_window, (wins[nth-1],))
action.process()
def logout():
from feeders import logout as logout_feeder
from feeders import confirm
proc = i3ci_menu.call(f=DMENU_FONT,
lmax=4,
nb='#002b36',
nf='#eee8dc',
sb='#cb4b16',
sf='#eee8d5')
reply = proc.communicate(
'\n'.join(logout_feeder.feed()).encode('utf-8'))[0]
if reply:
action = Action()
action.add_action(Action.set_mode, ("confirm {0} ?".format(reply),))
action.process()
proc = i3ci_menu.call(f=DMENU_FONT,
lmax=4,
nb='#002b36',
nf='#eee8dc',
sb='#cb4b16',
sf='#eee8d5')
conf = proc.communicate('\n'.join(confirm.feed()).encode('utf-8'))[0]
if conf == 'OK':
action = Action()
default_mode(action)
action.process()
exec_ = os.path.join(MODULE_PATH, 'i3-exit')
cmd = '{0} --{1}'.format(exec_, reply)
Popen(cmd, shell=True)
return
default_mode()
def execute_cmd(feeder, prefix):
''' Execute: i3-msg prefix *user_choice* '''
proc = i3ci_menu.call(p=feeder.get_prompt(prefix),
f=DMENU_FONT,
h=DMENU_HEIGHT,
sb='#cb4b16')
reply = proc.communicate('\n'.join(feeder.feed(prefix)).encode('utf-8'))[0]
if reply:
reply = reply.decode('utf-8')
cmd = reply
if prefix:
cmd = prefix + ' ' + cmd
action = Action()
action.add_action(Action.cmd, (cmd,))
action.process()
def _get_window_workspace(win_id):
cworkspaces = cur_workspaces.get_cur_workspaces()
for ws in cworkspaces:
ws_tree = i3.filter(name=ws)
if i3.filter(tree=ws_tree, id=win_id):
return ws
return None
def _get_window_ids_of_workspace(ws):
res = []
wks = i3.filter(name=ws)
wins = i3.filter(tree=wks, nodes=[])
for w in wins:
res.append(w['id'])
return res
def _get_windows_from_workspace(ws):
res = []
if ws is None:
ws = cur_workspace.feed()
workspace = i3.filter(name=ws)
if workspace:
workspace = workspace[0]
windows = i3.filter(workspace, nodes=[])
for window in windows:
res.append(window['id'])
return res
|
mit
| 1,454,420,463,426,443,000
| 32.612782
| 79
| 0.552455
| false
| 3.733974
| false
| false
| false
|
ryanpetrello/draughtcraft
|
draughtcraft/tests/selenium/recipes/test_builder.py
|
1
|
23046
|
import time
from selenium.webdriver.support.ui import Select
from draughtcraft import model
from draughtcraft.tests.selenium import TestSeleniumApp
class TestAllGrainBuilder(TestSeleniumApp):
def setUp(self):
super(TestAllGrainBuilder, self).setUp()
model.Style(
name='American IPA',
min_og=1.056,
max_og=1.075,
min_fg=1.01,
max_fg=1.018,
min_ibu=40,
max_ibu=70,
min_srm=6,
max_srm=15,
min_abv=.055,
max_abv=.075,
category_number=14,
style_letter='B'
)
model.Style(
name='Spice, Herb, or Vegetable Beer',
category_number=21,
style_letter='A'
)
model.commit()
self.get("/")
self.b.find_element_by_link_text("Create Your Own Recipe").click()
time.sleep(.1)
self.b.find_element_by_id("name").clear()
self.b.find_element_by_id("name").send_keys("Rocky Mountain River IPA")
Select(
self.b.find_element_by_id("type")
).select_by_visible_text("All Grain")
self.b.find_element_by_css_selector("button.ribbon").click()
@property
def b(self):
return self.browser
def blur(self):
self.b.find_element_by_css_selector(".logo").click()
def test_defaults(self):
self.wait.until(
lambda driver:
self.b.find_element_by_name("name").get_attribute("value") ==
"Rocky Mountain River IPA"
)
self.assertEqual(
"DraughtCraft - Rocky Mountain River IPA",
self.b.title
)
self.assertEqual(
"5",
self.b.find_element_by_name("volume").get_attribute("value")
)
assert self.b.find_element_by_css_selector('.step.mash') is not None
assert self.b.find_element_by_css_selector('.step.boil') is not None
assert self.b.find_element_by_css_selector('.step.ferment') \
is not None
def test_name_change_save(self):
self.b.find_element_by_name("name").send_keys("!")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("name").get_attribute("value") ==
"Rocky Mountain River IPA!"
)
def test_name_change_updates_page_title(self):
self.b.find_element_by_name("name").send_keys("!")
self.blur()
assert self.b.title == 'DraughtCraft - Rocky Mountain River IPA!'
def test_style_choose(self):
self.b.find_element_by_link_text("No Style Specified").click()
self.b.find_element_by_link_text("American IPA").click()
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_css_selector(".selectBox-label").text ==
"American IPA"
)
self.b.find_element_by_link_text("American IPA").click()
self.b.find_element_by_link_text("No Style Specified").click()
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_css_selector(".selectBox-label").text ==
"No Style Specified"
)
def test_volume_change_save(self):
self.b.find_element_by_name("volume").clear()
self.b.find_element_by_name("volume").send_keys("10")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("volume").get_attribute("value") ==
"10"
)
def test_notes_change_save(self):
self.b.find_element_by_css_selector('.notes textarea').send_keys("ABC")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_css_selector('.notes textarea')
.get_attribute("value") == "ABC"
)
def test_remove_addition(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
label = 'Add Dry Hops...' if step == 'Ferment' else 'Add Hops...'
self.b.find_element_by_link_text(label).click()
self.b.find_element_by_link_text("Simcoe (US)").click()
time.sleep(2)
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
self.b.find_element_by_css_selector(
'.%s .ingredient-list .addition .close a' % step.lower()
).click()
time.sleep(2)
self.b.refresh()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
def test_add_malt(self):
model.Fermentable(
name='2-Row',
type='MALT',
origin='US',
ppg=36,
lovibond=2,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
self.b.find_element_by_link_text(
"Add Malt/Fermentables..."
).click()
self.b.find_element_by_link_text("2-Row (US)").click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
def test_add_extract(self):
model.Fermentable(
name="Cooper's Amber LME",
type='EXTRACT',
origin='AUSTRALIAN',
ppg=36,
lovibond=13.3,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
self.b.find_element_by_link_text("Add Malt Extract...").click()
self.b.find_element_by_link_text(
"Cooper's Amber LME (Australian)"
).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
def test_add_hop(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
label = 'Add Dry Hops...' if step == 'Ferment' else 'Add Hops...'
self.b.find_element_by_link_text(label).click()
self.b.find_element_by_link_text("Simcoe (US)").click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
def test_add_extra(self):
model.Extra(
name="Whirlfloc Tablet",
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition' % step.lower()
)) == 0
self.b.find_element_by_link_text("Add Misc...").click()
self.b.find_element_by_link_text("Whirlfloc Tablet").click()
assert len(self.b.find_elements_by_css_selector(
'.%s .ingredient-list .addition:not(:empty)' % step.lower()
)) == 1
def test_mash_method_change(self):
Select(
self.b.find_element_by_name('mash_method')
).select_by_visible_text("Multi-Step")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("mash_method").
get_attribute("value") == "MULTISTEP"
)
def test_mash_instructions_change(self):
self.b.find_element_by_name('mash_instructions').clear()
self.b.find_element_by_name('mash_instructions').send_keys(
'Testing 1 2 3'
)
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("mash_instructions").
get_attribute("value") == "Testing 1 2 3"
)
def test_boil_minutes(self):
self.b.find_element_by_link_text('Boil').click()
self.b.find_element_by_name('boil_minutes').clear()
self.b.find_element_by_name('boil_minutes').send_keys('90')
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("boil_minutes").
get_attribute("value") == "90"
)
def test_fermentation_schedule_change(self):
self.b.find_element_by_link_text('Ferment').click()
self.b.find_element_by_link_text("Add...").click()
self.b.find_element_by_link_text("Add...").click()
days = self.b.find_elements_by_css_selector('.process select.days')
temps = self.b.find_elements_by_css_selector(
'.process select.fahrenheit'
)
assert len(days) == 3
assert len(temps) == 3
for i, el in enumerate(days):
Select(el).select_by_visible_text(str(14 + (7 * i)))
for j, el in enumerate(temps):
Select(el).select_by_visible_text(str(68 + (2 * j)))
self.blur()
time.sleep(2)
self.b.refresh()
time.sleep(1)
days = self.b.find_elements_by_css_selector('.process select.days')
temps = self.b.find_elements_by_css_selector(
'.process select.fahrenheit'
)
assert len(days) == 3
assert len(temps) == 3
for i, d in enumerate(days):
assert d.get_attribute('value') == str(14 + (7 * i))
for j, t in enumerate(temps):
assert t.get_attribute('value') == str(68 + (2 * j))
def test_change_fermentable_amount(self):
model.Fermentable(
name='2-Row',
type='MALT',
origin='US',
ppg=36,
lovibond=2,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
self.b.find_element_by_link_text(
"Add Malt/Fermentables..."
).click()
self.b.find_element_by_link_text("2-Row (US)").click()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
i.clear()
i.send_keys('10 lb')
self.blur()
time.sleep(2)
self.b.refresh()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
assert i.get_attribute('value') == '10 lb'
def test_metric_entry(self):
model.Fermentable(
name='2-Row',
type='MALT',
origin='US',
ppg=36,
lovibond=2,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
self.b.find_element_by_link_text(
"Add Malt/Fermentables..."
).click()
self.b.find_element_by_link_text("2-Row (US)").click()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
i.clear()
i.send_keys('1 kg')
self.blur()
time.sleep(2)
self.b.refresh()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
assert i.get_attribute('value') == '2.204 lb'
def test_change_hop_form(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
label = 'Add Dry Hops...' if step == 'Ferment' else 'Add Hops...'
self.b.find_element_by_link_text(label).click()
self.b.find_element_by_link_text("Simcoe (US)").click()
s = Select(self.b.find_element_by_css_selector(
'.%s .addition .form select' % step.lower()
))
s.select_by_visible_text('Pellet')
self.blur()
time.sleep(2)
self.b.refresh()
s = self.b.find_element_by_css_selector(
'.%s .addition .form select' % step.lower()
)
assert s.get_attribute('value') == 'PELLET'
def test_change_hop_aa(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil', 'Ferment'):
self.b.find_element_by_link_text(step).click()
label = 'Add Dry Hops...' if step == 'Ferment' else 'Add Hops...'
self.b.find_element_by_link_text(label).click()
self.b.find_element_by_link_text("Simcoe (US)").click()
i = self.b.find_element_by_css_selector(
'.%s .addition .unit input' % step.lower()
)
i.clear()
i.send_keys('12')
self.blur()
time.sleep(2)
self.b.refresh()
i = self.b.find_element_by_css_selector(
'.%s .addition .unit input' % step.lower()
)
assert i.get_attribute('value') == '12'
def test_change_hop_boil_time(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
self.b.find_element_by_link_text('Boil').click()
self.b.find_element_by_link_text('Add Hops...').click()
self.b.find_element_by_link_text("Simcoe (US)").click()
selects = self.b.find_elements_by_css_selector(
'.boil .addition .time select'
)
Select(selects[1]).select_by_visible_text('45 min')
self.blur()
time.sleep(2)
self.b.refresh()
selects = self.b.find_elements_by_css_selector(
'.boil .addition .time select'
)
assert selects[1].get_attribute('value') == '45'
def test_change_hop_first_wort(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
self.b.find_element_by_link_text('Boil').click()
self.b.find_element_by_link_text('Add Hops...').click()
self.b.find_element_by_link_text("Simcoe (US)").click()
selects = self.b.find_elements_by_css_selector(
'.boil .addition .time select'
)
Select(selects[0]).select_by_visible_text('First Wort')
assert not selects[1].is_displayed()
def test_change_hop_flameout(self):
model.Hop(
name="Simcoe",
origin='US',
alpha_acid=13,
description='Sample Description'
)
model.commit()
self.b.refresh()
self.b.find_element_by_link_text('Boil').click()
self.b.find_element_by_link_text('Add Hops...').click()
self.b.find_element_by_link_text("Simcoe (US)").click()
selects = self.b.find_elements_by_css_selector(
'.boil .addition .time select'
)
Select(selects[0]).select_by_visible_text('Flame Out')
assert not selects[1].is_displayed()
def test_yeast_step(self):
model.Yeast(
name='Wyeast 1056 - American Ale',
type='ALE',
form='LIQUID',
attenuation=.75,
flocculation='MEDIUM/HIGH'
)
model.commit()
self.b.refresh()
self.b.find_element_by_link_text('Ferment').click()
self.b.find_element_by_link_text('Add Yeast...').click()
self.b.find_element_by_link_text('Wyeast 1056 - American Ale').click()
Select(self.b.find_element_by_css_selector(
'.ferment .addition select'
)).select_by_visible_text('Secondary')
time.sleep(2)
self.b.refresh()
assert self.b.find_element_by_css_selector(
'.ferment .addition select'
).get_attribute('value') == 'SECONDARY'
class TestExtractBuilder(TestSeleniumApp):
def setUp(self):
super(TestExtractBuilder, self).setUp()
self.get("/")
self.b.find_element_by_link_text("Create Your Own Recipe").click()
time.sleep(.1)
self.b.find_element_by_id("name").clear()
self.b.find_element_by_id("name").send_keys("Rocky Mountain River IPA")
Select(
self.b.find_element_by_id("type")
).select_by_visible_text("Extract")
self.b.find_element_by_css_selector("button.ribbon").click()
@property
def b(self):
return self.browser
def test_mash_missing(self):
assert len(
self.b.find_elements_by_css_selector('.step.boil h2 li a')
) == 2
class TestMetricBuilder(TestSeleniumApp):
def setUp(self):
super(TestMetricBuilder, self).setUp()
self.get("/")
self.b.find_element_by_link_text("Create Your Own Recipe").click()
self.b.find_element_by_link_text("Want Metric Units?").click()
time.sleep(.1)
self.b.find_element_by_id("name").clear()
self.b.find_element_by_id("name").send_keys("Rocky Mountain River IPA")
Select(
self.b.find_element_by_id("type")
).select_by_visible_text("All Grain")
self.b.find_element_by_css_selector("button.ribbon").click()
@property
def b(self):
return self.browser
def blur(self):
self.b.find_element_by_css_selector(".logo").click()
def test_defaults(self):
self.wait.until(
lambda driver:
self.b.find_element_by_name("name").get_attribute("value") ==
"Rocky Mountain River IPA"
)
self.assertEqual(
"DraughtCraft - Rocky Mountain River IPA",
self.b.title
)
self.assertEqual(
"20",
self.b.find_element_by_name("volume").get_attribute("value")
)
assert self.b.find_element_by_css_selector('.step.mash') is not None
assert self.b.find_element_by_css_selector('.step.boil') is not None
assert self.b.find_element_by_css_selector('.step.ferment') \
is not None
def test_volume_change_save(self):
self.b.find_element_by_name("volume").clear()
self.b.find_element_by_name("volume").send_keys("10")
self.blur()
time.sleep(2)
self.b.refresh()
self.wait.until(
lambda driver:
self.b.find_element_by_name("volume").get_attribute("value") ==
"10"
)
def test_metric_ingredient_amount(self):
model.Fermentable(
name='2-Row',
type='MALT',
origin='US',
ppg=36,
lovibond=2,
description='Sample Description'
)
model.commit()
self.b.refresh()
for step in ('Mash', 'Boil'):
self.b.find_element_by_link_text(step).click()
self.b.find_element_by_link_text(
"Add Malt/Fermentables..."
).click()
self.b.find_element_by_link_text("2-Row (US)").click()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
i.clear()
i.send_keys('1 kg')
self.blur()
time.sleep(2)
self.b.refresh()
i = self.b.find_element_by_css_selector(
'.%s .addition .amount input' % step.lower()
)
assert i.get_attribute('value') == '1 kg'
def test_fermentation_schedule_change(self):
self.b.find_element_by_link_text('Ferment').click()
self.b.find_element_by_link_text("Add...").click()
self.b.find_element_by_link_text("Add...").click()
days = self.b.find_elements_by_css_selector('.process select.days')
temps = self.b.find_elements_by_css_selector(
'.process select.fahrenheit'
)
assert len(days) == 3
assert len(temps) == 3
for i, el in enumerate(days):
Select(el).select_by_visible_text(str(14 + (7 * i)))
for j, el in enumerate(temps):
Select(el).select_by_visible_text(str(20 + (2 * j)))
self.blur()
time.sleep(2)
self.b.refresh()
time.sleep(1)
days = self.b.find_elements_by_css_selector('.process select.days')
temps = self.b.find_elements_by_css_selector(
'.process select.fahrenheit'
)
assert len(days) == 3
assert len(temps) == 3
for i, d in enumerate(days):
assert d.get_attribute('value') == str(14 + (7 * i))
for j, t in enumerate(temps):
assert t.get_attribute('value') == str(20 + (2 * j))
|
bsd-3-clause
| 5,432,450,340,929,008,000
| 30.017497
| 79
| 0.525037
| false
| 3.501899
| true
| false
| false
|
DarkRedman/PyGet
|
pyget.py
|
1
|
6109
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Filename: pydl.py #
# Authors: Brian Tomlinson <darthlukan@gmail.com> #
# Manuel Debaux <debaux.manual@gmail.com> #
# Brian Turner <archkaine@gmail.com> #
# URL: git@github.com:darthlukan/piddle.git #
# Description: A simple CLI download manager written in Python. #
# Warning: If you received this program from any source other than #
# the above noted URL, please check the source code! You may have #
# downloaded a file with malicious code injected. #
# License: GPLv2, Please see the included LICENSE file. #
# Note: This software should be considered experimental! #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Explanation of import list:
# os and sys are needed to make sure that files and system level stuff
# are handled properly. urllib(2) for communications (we are downloading)
# fileinput handles looping over links in a file (txt for now, csv later)
# progressbar adds some bling for the user to look at while we work. To get
# progressbar to work, pip2 install progressbar.
import os
import sys
import urllib
import urllib2
import fileinput
from progressbar import *
#Introduce ourselves
print("""Hello! I am going to ensure that downloading your files, renaming them,
and specifying where to save them, are as simple as possible. Let's get to it!""")
# Warn the user about non-existent feature
print('Be warned! File Looping has been implemented but is experimental.')
print('Downloading large groups of files could lead to RAM abuse.')
# The function that actually gets stuff
def getDownload(urlToGetFile, fileNameToSave): # Grab the file(s)
filelen=0
data=""
retry=False
error=False
try:
data=str(urllib2.urlopen(urlToGetFile).info())
index=data.find("Content-Length")
assert(index != -1), "Impossible déterminer la taille du fichier"
data=data[index:]
data=data[16:data.find("\r")]
filelen+=int(data)
except Exception as err:
print(err)
if filelen == 0:
filelen=10.5
if ".flv" in urlToGetFile:
filelen=300000
# Placeholder for progressbar:
widgets = ['Download Progress: ', Percentage(), ' ',
Bar(marker=RotatingMarker(),left='[',right=']'),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=filelen)
pbar.start()
try:
webfile=urllib.urlopen(urlToGetFile)
byte = webfile.read(1)
data=byte
cur=0
while byte:
byte = webfile.read(1)
data+=byte
cur+=1
pbar.update(cur)
with open(fileNameToSave,'wb') as f:
f.write(data)
except IOError:
print("%s is an incorrect filename, cannot save the file" % fileNameToSave)
error=True
finally:
pbar.finish()
if error:
if raw_input('Do you want to retry with a new filename ? (y/n): ') == "y":
fileNameToSave=raw_input('Enter the desired path and filename: ')
getDownload(urlToGetFile, fileNameToSave)
# This looks redundant now, but just wait... :)
def getSpecialDownload(urlToGetFile, fileNameToSave):
urllib.urlretrieve(urlToGetFile, fileNameToSave)
# Placeholder for progressbar:
#widgets = ['Overall Progress: ', Percentage(), ' ',
# Bar(marker='#',left='[',right=']'),
# ' ', ETA(), ' ', FileTransferSpeed()]
#pbar = ProgressBar(widgets=widgets, maxval=nl)
#pbar.start()
# The function that sums the lengths of all files to download
# This function avoid to download all files to get lengths but it's take quite time to get few files length
def getOverallLength(fileNameUrls):
fi = fileinput.input(fileNameUrls)
overallLength=0
for line in fi:
data=str(urllib2.urlopen(line[:-1]).info())
data=data[data.find("Content-Length"):]
data=data[16:data.find("\r")]
overallLength+=int(data)
return overallLength
def fileLoopCheck():
specialDownload = raw_input('Do you need to import a file with links?(y/n): ')
if specialDownload == 'n':
urlToGetFile = raw_input('Please enter the download URL: ')
fileNameToSave = raw_input('Enter the desired path and filename: ')
getDownload(urlToGetFile,fileNameToSave)
elif specialDownload == 'y':
fileNameUrls = raw_input('Enter the filename (with path) that contains URLs: ')
baseDir = raw_input('Enter the directory where you want the files saved: ')
# Define how to handle pathing, default to preceding '/'
if not baseDir.endswith("/") and baseDir != '':
baseDir+="/"
# Grab the file and iterate over each line, this is not yet smart enough
# to discern between an actual url and erroneous text, so don't have anything
# other than links in your input file!
fi = fileinput.input(fileNameUrls)
nl=0 #numbers of line
for line in fi:
nl+=1 # iterate over the next line
# Re-read, this will be cleaned up later
fi = fileinput.input(fileNameUrls) # reset the fileinput : can't reuse it
cl=0 # currentline
# Progressbar() stuff, wheee!
widgets = ['Overall Progress: ', Percentage(), ' ',
Bar(marker='>',left='[',right=']'),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=overallLength)
pbar.start()
# Done with the prep work, time to do what the user wants
for line in fi:
urlToGetFile=line[:-1]
fileNameToSave=baseDir+urlToGetFile[urlToGetFile.rfind('/')+1:]
getSpecialDownload(urlToGetFile, fileNameToSave)
cl+=1
pbar.update(overallLength/nl*cl)
pbar.finish()
print('All done!')
else:
print('There was an error in your response, let\'s try again...')
fileLoopCheck()
# Call start function
fileLoopCheck()
|
gpl-2.0
| 544,684,471,682,824,060
| 38.921569
| 107
| 0.628193
| false
| 3.763401
| false
| false
| false
|
algorhythms/LeetCode
|
934 Shortest Bridge.py
|
1
|
2622
|
#!/usr/bin/python3
"""
In a given 2D binary array A, there are two islands. (An island is a
4-directionally connected group of 1s not connected to any other 1s.)
Now, we may change 0s to 1s so as to connect the two islands together to form 1
island.
Return the smallest number of 0s that must be flipped. (It is guaranteed that
the answer is at least 1.)
Example 1:
Input: [[0,1],[1,0]]
Output: 1
Example 2:
Input: [[0,1,0],[0,0,0],[0,0,1]]
Output: 2
Example 3:
Input: [[1,1,1,1,1],[1,0,0,0,1],[1,0,1,0,1],[1,0,0,0,1],[1,1,1,1,1]]
Output: 1
Note:
1 <= A.length = A[0].length <= 100
A[i][j] == 0 or A[i][j] == 1
"""
from typing import List
dirs = ((0, -1), (0, 1), (-1, 0), (1, 0))
class Solution:
def shortestBridge(self, A: List[List[int]]) -> int:
"""
market component 1 and component 2
iterate 0 and BFS, min(dist1 + dist2 - 1)?
O(N * N) high complexity
BFS grow from 1 component
"""
m, n = len(A), len(A[0])
# coloring
colors = [[None for _ in range(n)] for _ in range(m)]
color = 0
for i in range(m):
for j in range(n):
if A[i][j] == 1 and colors[i][j] is None:
self.dfs(A, i, j, colors, color)
color += 1
assert color == 2
# BFS
step = 0
q = []
visited = [[False for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
if colors[i][j] == 0:
visited[i][j] = True
q.append((i, j))
while q:
cur_q = []
for i, j in q:
for I, J in self.nbr(A, i, j):
if not visited[I][J]:
if colors[I][J] == None:
visited[I][J] = True # pre-check, dedup
cur_q.append((I, J))
elif colors[I][J] == 1:
return step
step += 1
q = cur_q
raise
def nbr(self, A, i, j):
m, n = len(A), len(A[0])
for di, dj in dirs:
I = i + di
J = j + dj
if 0 <= I < m and 0 <= J < n:
yield I, J
def dfs(self, A, i, j, colors, color):
colors[i][j] = color
for I, J in self.nbr(A, i, j):
if colors[I][J] is None and A[I][J] == 1:
self.dfs(A, I, J, colors, color)
if __name__ == "__main__":
assert Solution().shortestBridge([[1,1,1,1,1],[1,0,0,0,1],[1,0,1,0,1],[1,0,0,0,1],[1,1,1,1,1]]) == 1
|
mit
| -2,098,768,741,559,814,000
| 26.3125
| 104
| 0.450801
| false
| 3.045296
| false
| false
| false
|
badele/home-assistant
|
homeassistant/components/zone.py
|
1
|
4147
|
"""
homeassistant.components.zone
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Allows defintion of zones in Home Assistant.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zone/
"""
import logging
from homeassistant.const import (
ATTR_HIDDEN, ATTR_ICON, ATTR_LATITUDE, ATTR_LONGITUDE, CONF_NAME)
from homeassistant.helpers import extract_domain_configs, generate_entity_id
from homeassistant.helpers.entity import Entity
from homeassistant.util.location import distance
DOMAIN = "zone"
DEPENDENCIES = []
ENTITY_ID_FORMAT = 'zone.{}'
ENTITY_ID_HOME = ENTITY_ID_FORMAT.format('home')
STATE = 'zoning'
DEFAULT_NAME = 'Unnamed zone'
ATTR_RADIUS = 'radius'
DEFAULT_RADIUS = 100
ICON_HOME = 'mdi:home'
def active_zone(hass, latitude, longitude, radius=0):
""" Find the active zone for given latitude, longitude. """
# Sort entity IDs so that we are deterministic if equal distance to 2 zones
zones = (hass.states.get(entity_id) for entity_id
in sorted(hass.states.entity_ids(DOMAIN)))
min_dist = None
closest = None
for zone in zones:
zone_dist = distance(
latitude, longitude,
zone.attributes[ATTR_LATITUDE], zone.attributes[ATTR_LONGITUDE])
within_zone = zone_dist - radius < zone.attributes[ATTR_RADIUS]
closer_zone = closest is None or zone_dist < min_dist
smaller_zone = (zone_dist == min_dist and
zone.attributes[ATTR_RADIUS] <
closest.attributes[ATTR_RADIUS])
if within_zone and (closer_zone or smaller_zone):
min_dist = zone_dist
closest = zone
return closest
def in_zone(zone, latitude, longitude, radius=0):
""" Test if given latitude, longitude is in given zone. """
zone_dist = distance(
latitude, longitude,
zone.attributes[ATTR_LATITUDE], zone.attributes[ATTR_LONGITUDE])
return zone_dist - radius < zone.attributes[ATTR_RADIUS]
def setup(hass, config):
""" Setup zone. """
entities = set()
for key in extract_domain_configs(config, DOMAIN):
entries = config[key]
if not isinstance(entries, list):
entries = entries,
for entry in entries:
name = entry.get(CONF_NAME, DEFAULT_NAME)
latitude = entry.get(ATTR_LATITUDE)
longitude = entry.get(ATTR_LONGITUDE)
radius = entry.get(ATTR_RADIUS, DEFAULT_RADIUS)
icon = entry.get(ATTR_ICON)
if None in (latitude, longitude):
logging.getLogger(__name__).error(
'Each zone needs a latitude and longitude.')
continue
zone = Zone(hass, name, latitude, longitude, radius, icon)
zone.entity_id = generate_entity_id(ENTITY_ID_FORMAT, name,
entities)
zone.update_ha_state()
entities.add(zone.entity_id)
if ENTITY_ID_HOME not in entities:
zone = Zone(hass, hass.config.location_name, hass.config.latitude,
hass.config.longitude, DEFAULT_RADIUS, ICON_HOME)
zone.entity_id = ENTITY_ID_HOME
zone.update_ha_state()
return True
class Zone(Entity):
""" Represents a Zone in Home Assistant. """
# pylint: disable=too-many-arguments
def __init__(self, hass, name, latitude, longitude, radius, icon):
self.hass = hass
self._name = name
self.latitude = latitude
self.longitude = longitude
self.radius = radius
self._icon = icon
def should_poll(self):
return False
@property
def name(self):
return self._name
@property
def state(self):
""" The state property really does nothing for a zone. """
return STATE
@property
def icon(self):
return self._icon
@property
def state_attributes(self):
return {
ATTR_HIDDEN: True,
ATTR_LATITUDE: self.latitude,
ATTR_LONGITUDE: self.longitude,
ATTR_RADIUS: self.radius,
}
|
mit
| -537,197,058,929,668,000
| 29.270073
| 79
| 0.613456
| false
| 4.081693
| true
| false
| false
|
tkensiski/rexus
|
Rexus/device_poller/devices/device_loader.py
|
1
|
1483
|
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class DeviceLoader(object):
device_classes = {
# device_type : getattr(module, class_name)
}
def load_device_class(self, device_type, device_class):
logger.info('Loading class for device type: {device_type}'.format(
device_type=device_type.name
))
return self._load_device_class(device_type=device_type, device_class=device_class)
def _load_device_class(self, device_type, device_class):
if device_type in self.device_classes:
logger.debug('Class already loaded for: {device_type}'.format(
device_type=device_type.name
))
return self.device_classes[device_type]
# Build the module name
module_name = 'rexus.devices.{name}'.format(name=device_class.file)
# Import the module
module = __import__(module_name, fromlist=[device_class.klass])
# Get the class reference so we can use it later
loaded_class = getattr(module, device_class.klass)
# Memoize it for later
self._memoize_device_class(device_type=device_type, loaded_class=loaded_class)
# Pass it back so we can use it
return loaded_class
def _memoize_device_class(self, device_type, loaded_class):
if device_type in self.device_classes:
pass
self.device_classes[device_type] = loaded_class
|
gpl-3.0
| 7,591,930,469,421,827,000
| 31.23913
| 90
| 0.641268
| false
| 3.944149
| false
| false
| false
|
t11e/werkzeug
|
werkzeug/serving.py
|
1
|
18996
|
# -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
one can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug import run_simple
app = make_app(...)
run_simple('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `werkzeug.script`
instead of a simple start file.
:copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import socket
import sys
import time
import thread
import subprocess
from urllib import unquote
from urlparse import urlparse
from itertools import chain
from SocketServer import ThreadingMixIn, ForkingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import werkzeug
from werkzeug._internal import _log
from werkzeug.exceptions import InternalServerError
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
return 'Werkzeug/' + werkzeug.__version__
def make_environ(self):
path_info, query = urlparse(self.path)[2::2]
url_scheme = self.server.ssl_context is None and 'http' or 'https'
environ = {
'wsgi.version': (1, 0),
'wsgi.url_scheme': url_scheme,
'wsgi.input': self.rfile,
'wsgi.errors': sys.stderr,
'wsgi.multithread': self.server.multithread,
'wsgi.multiprocess': self.server.multiprocess,
'wsgi.run_once': False,
'SERVER_SOFTWARE': self.server_version,
'REQUEST_METHOD': self.command,
'SCRIPT_NAME': '',
'PATH_INFO': unquote(path_info),
'QUERY_STRING': query,
'CONTENT_TYPE': self.headers.get('Content-Type', ''),
'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
'REMOTE_ADDR': self.client_address[0],
'REMOTE_PORT': self.client_address[1],
'SERVER_NAME': self.server.server_address[0],
'SERVER_PORT': str(self.server.server_address[1]),
'SERVER_PROTOCOL': self.request_version
}
for key, value in self.headers.items():
key = 'HTTP_' + key.upper().replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
return environ
def run_wsgi(self):
app = self.server.app
environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, 'write() before start_response'
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
code, msg = status.split(None, 1)
self.send_response(int(code), msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if 'content-length' not in header_keys:
self.close_connection = True
self.send_header('Connection', 'close')
if 'server' not in header_keys:
self.send_header('Server', self.version_string())
if 'date' not in header_keys:
self.send_header('Date', self.date_time_string())
self.end_headers()
assert type(data) is str, 'applications must write bytes'
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
elif headers_set:
raise AssertionError('Headers already set')
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
# make sure the headers are sent
if not headers_sent:
write('')
finally:
if hasattr(application_iter, 'close'):
application_iter.close()
application_iter = None
try:
execute(app)
except (socket.error, socket.timeout), e:
self.connection_dropped(e, environ)
except:
if self.server.passthrough_errors:
raise
from werkzeug.debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except:
pass
self.server.log('error', 'Error on request:\n%s',
traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
try:
return BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout), e:
self.connection_dropped(e)
except:
if self.server.ssl_context is None or not is_ssl_error():
raise
def connection_dropped(self, error, environ=None):
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
elif self.parse_request():
return self.run_wsgi()
def send_response(self, code, message=None):
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = code in self.responses and self.responses[code][0] or ''
if self.request_version != 'HTTP/0.9':
self.wfile.write("%s %d %s\r\n" %
(self.protocol_version, code, message))
def version_string(self):
return BaseHTTPRequestHandler.version_string(self).strip()
def address_string(self):
return self.client_address[0]
def log_request(self, code='-', size='-'):
self.log('info', '"%s" %s %s', self.requestline, code, size)
def log_error(self, *args):
self.log('error', *args)
def log_message(self, format, *args):
self.log('info', format, *args)
def log(self, type, message, *args):
_log(type, '%s - - [%s] %s\n' % (self.address_string(),
self.log_date_time_string(),
message % args))
#: backwards compatible name if someone is subclassing it
BaseRequestHandler = WSGIRequestHandler
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
from random import random
from OpenSSL import crypto, SSL
cert = crypto.X509()
cert.set_serial_number(int(random() * sys.maxint))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
subject = cert.get_subject()
subject.CN = '*'
subject.O = 'Dummy Certificate'
issuer = cert.get_issuer()
issuer.CN = 'Untrusted Authority'
issuer.O = 'Self-Signed'
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 768)
cert.set_pubkey(pkey)
cert.sign(pkey, 'md5')
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_privatekey(pkey)
ctx.use_certificate(cert)
return ctx
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
if error is None:
error = sys.exc_info()[1]
from OpenSSL import SSL
return isinstance(error, SSL.Error)
class _SSLConnectionFix(object):
"""Wrapper around SSL connection to provide a working makefile()."""
def __init__(self, con):
self._con = con
def makefile(self, mode, bufsize):
return socket._fileobject(self._con, mode, bufsize)
def __getattr__(self, attrib):
return getattr(self._con, attrib)
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
def __init__(self, host, port, app, handler=None,
passthrough_errors=False, ssl_context=None):
if handler is None:
handler = WSGIRequestHandler
HTTPServer.__init__(self, (host, int(port)), handler)
self.app = app
self.passthrough_errors = passthrough_errors
if ssl_context is not None:
try:
from OpenSSL import tsafe
except ImportError:
raise TypeError('SSL is not available if the OpenSSL '
'library is not installed.')
if ssl_context == 'adhoc':
ssl_context = generate_adhoc_ssl_context()
self.socket = tsafe.Connection(ssl_context, self.socket)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
try:
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
else:
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
if self.ssl_context is not None:
con = _SSLConnectionFix(con)
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(self, host, port, app, processes=40, handler=None,
passthrough_errors=False, ssl_context=None):
BaseWSGIServer.__init__(self, host, port, app, handler,
passthrough_errors, ssl_context)
self.max_children = processes
def make_server(host, port, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
def reloader_loop(extra_files=None, interval=1):
"""When this function is run from the main thread, it will force other
threads to exit when any modules currently loaded change.
Copyright notice. This function is based on the autoreload.py from
the CherryPy trac which originated from WSGIKit which is now dead.
:param extra_files: a list of additional files it should watch.
"""
def iter_module_files():
for module in sys.modules.values():
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
mtimes = {}
while 1:
for filename in chain(iter_module_files(), extra_files or ()):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
_log('info', ' * Detected change in %r, reloading' % filename)
sys.exit(3)
time.sleep(interval)
def restart_with_reloader():
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with reloader...')
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def run_with_reloader(main_func, extra_files=None, interval=1):
"""Run the given function in an independent python interpreter."""
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
thread.start_new_thread(main_func, ())
try:
reloader_loop(extra_files, interval)
except KeyboardInterrupt:
return
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
def run_simple(hostname, port, application, use_reloader=False,
use_debugger=False, use_evalex=True,
extra_files=None, reloader_interval=1, threaded=False,
processes=1, request_handler=None, static_files=None,
passthrough_errors=False, ssl_context=None):
"""Start an application using wsgiref and with an optional reloader. This
wraps `wsgiref` to fix the wrong default reporting of the multithreaded
WSGI variable and adds optional multithreading and fork support.
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
:param hostname: The host for the application. eg: ``'localhost'``
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param threaded: should the process handle each request in a separate
thread?
:param processes: number of processes to spawn.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a dict of paths for static files. This works exactly
like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connction, 'adhoc' if the server
should automatically create one, or `None` to disable
SSL (which is the default).
"""
if use_debugger:
from werkzeug.debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from werkzeug.utils import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def inner():
make_server(hostname, port, application, threaded,
processes, request_handler,
passthrough_errors, ssl_context).serve_forever()
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
display_hostname = hostname or '127.0.0.1'
_log('info', ' * Running on http://%s:%d/', display_hostname, port)
if use_reloader:
# Create and destroy a socket so that any exceptions are raised before
# we spawn a separate Python interpreter and lose this ability.
test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
test_socket.bind((hostname, port))
test_socket.close()
run_with_reloader(inner, extra_files, reloader_interval)
else:
inner()
|
bsd-3-clause
| -766,306,627,783,075,700
| 36.916168
| 80
| 0.58686
| false
| 4.427972
| false
| false
| false
|
artoonie/transcroobie
|
transcroobie/settings.py
|
1
|
4966
|
"""
Django settings for transcroobie project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
ALLOWED_HOSTS = ['transcroobie.herokuapp.com', 'localhost']
# Application definition
INSTALLED_APPS = [
'transcroobie',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hit',
'hitrequest',
'storages',
'social.apps.django_app.default',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'transcroobie.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'transcroobie.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {'default': dj_database_url.config()}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'PST8PDT'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
STATIC_ROOT = os.path.join(SITE_ROOT, 'static/')
AWS_QUERYSTRING_AUTH = False
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
AWS_S3_HOST = os.environ['AWS_S3_HOST']
GS_ACCESS_KEY_ID = os.environ['GS_ACCESS_KEY_ID']
GS_SECRET_ACCESS_KEY = os.environ['GS_SECRET_ACCESS_KEY']
GS_BUCKET_NAME = os.environ['GS_BUCKET_NAME']
DEFAULT_FILE_STORAGE = "storages.backends.gs.GSBotoStorage"
#DEFAULT_FILE_STORAGE = "storages.backends.s3boto.S3BotoStorage"
MEDIA_URL = '/media/'
MEDIA_ROOT = '/tmp/'
IS_DEV_ENV = str(os.environ.get('I_AM_IN_DEV_ENV')) != "0"
USE_AMT_SANDBOX = str(os.environ.get('USE_AMT_SANDBOX')) != "0"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = IS_DEV_ENV
# Celery
BROKER_URL = os.environ.get('REDIS_URL')
CELERY_RESULT_BACKEND = os.environ.get('REDIS_URL')
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'US/Pacific'
# Google oauth
AUTHENTICATION_BACKENDS = (
#'social.backends.open_id.OpenIdAuth',
#'social.backends.google.GoogleOpenId',
'social.backends.google.GoogleOAuth2',
#'social.backends.google.GoogleOAuth',
#'social.backends.twitter.TwitterOAuth',
#'social.backends.yahoo.YahooOpenId',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/hitrequest/index.html'
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ['SOCIAL_AUTH_GOOGLE_OAUTH2_KEY']
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ['SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET']
SOCIAL_AUTH_GOOGLE_OAUTH2_WHITELISTED_DOMAINS = ['avametric.com'] # for safety, for now
SOCIAL_AUTH_USER_MODEL = 'auth.User'
|
gpl-3.0
| -2,759,039,616,634,398,700
| 28.384615
| 91
| 0.706404
| false
| 3.237288
| false
| false
| false
|
Dwii/Master-Thesis
|
implementation/Python/lbm_2d_3d_example/cylinder3d.py
|
1
|
1909
|
# Copyright (C) 2013 FlowKit Ltd
from numpy import *
from pylb import multi
from pylb import lbio
#def inivelfun(x, y, z, d):
# """ v_x(x,y) = uMax*(1+.2*sin(y/ly*2pi)+.2*sin(z/lz*2pi)). v_y(x,y) = v_y(x,y)= 0 """
# return (d==0) * uLB * (1.0 + 1e-2 * sin(y/ly *2*pi) +
# 1e-2 * sin(z/lz *2*pi))
class InivelFun(object):
def __init__(self, uLB, ly, lz):
self.uLB, self.ly, self.lz = uLB, ly, lz
def __call__(self, x, y, z, d):
""" v_x(x,y) = uMax*(1+.2*sin(y/ly*2pi)+.2*sin(z/lz*2pi)). v_y(x,y) = v_y(x,y)= 0 """
return (d==0) * self.uLB * (1.0 + 1e-2 * sin(y/self.ly *2*pi) +
1e-2 * sin(z/self.lz *2*pi))
def cylinder(nx=160, ny=60, nz=60, Re=220.0, maxIter=10000, plotImages=True):
ly=ny-1.0
lz=nz-1.0
cx, cy, cz = nx/4, ny/2, nz/2
r=ny/9 # Coordinates of the cylinder.
uLB = 0.04 # Velocity in lattice units.
nulb = uLB * r / Re
omega = 1.0 / (3. * nulb + 0.5); # Relaxation parameter.
with multi.GenerateBlock((nx, ny, nz), omega) as block:
block.wall = fromfunction(lambda x, y, z: (x-cx)**2 + (z-cz)**2 < r**2, (nx, ny, nz))
inivelfun = InivelFun(uLB, ly, lz)
inivel = fromfunction(inivelfun, (nx, ny, nz, 3))
block.inipopulations(inivelfun)
block.setboundaryvel(inivelfun)
if plotImages:
plot = lbio.Plot(block.velnorm()[:,ny//2,:])
for time in range(maxIter):
block.collide_and_stream()
if (plotImages and time%10==0):
lbio.writelog(sum(sum(sum(block.wallforce()[:,:,:,0]))))
plot.draw(block.velnorm()[:,:,nz//2])
#print(block.fin[10,10,10,3])
#plot.savefig("vel."+str(time/100).zfill(4)+".png")
if __name__ == "__main__":
cylinder(maxIter=10000, plotImages=True)
|
mit
| 5,910,386,185,579,618,000
| 36.431373
| 94
| 0.510739
| false
| 2.53183
| false
| false
| false
|
SymbiFlow/symbiflow-arch-defs
|
utils/update_arch_timings.py
|
1
|
9248
|
#!/usr/bin/env python3
import lxml.etree as ET
import argparse
from sdf_timing import sdfparse
from sdf_timing.utils import get_scale_seconds
from lib.pb_type import get_pb_type_chain
import re
import os
import sys
# Adds output to stderr to track if timing data for a particular BEL was found
# in bels.json
DEBUG = False
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def mergedicts(source, destination):
"""This function recursively merges two dictionaries:
`source` into `destination"""
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
mergedicts(value, node)
else:
destination[key] = value
return destination
def remove_site_number(site):
"""Some sites are numbered in the VPR arch definitions.
This happens for e.g. SLICE0. This function removes
trailing numbers from the name"""
number = re.search(r'\d+$', site)
if number is not None:
site = site[:-len(str(number.group()))]
return site
def gen_all_possibilities(pattern):
"""
Generates all possible combinations of a pattern if it contains a
wildcard string in braces eg. "LUT[ABCD]" will yield in "LUTA", "LUTB"
and so on.
>>> list(gen_all_possibilities("LUT"))
['LUT']
>>> list(gen_all_possibilities("LUT[ABCD]"))
['LUTA', 'LUTB', 'LUTC', 'LUTD']
"""
# Match the regex
match = re.match(r"(.*)\[([A-Za-z0-9]+)\](.*)", pattern)
# Generate combinations
if match is not None:
for c in match.group(2):
yield match.group(1) + c + match.group(3)
# Not a regex
else:
yield pattern
def get_cell_types_and_instances(bel, location, site, bels):
"""This function searches for a bel type and instance
translation between VPR and Vivado. The translation
is defined in the `bels` dictionary. If translation
is found a list of celltypes and bel instances is returned,
None otherwise"""
if site not in bels:
if DEBUG:
eprint(
"Site '{}' not found among '{}'".format(
site, ", ".join(bels.keys())
)
)
return None
if bel not in bels[site]:
if DEBUG:
eprint(
"Bel '{}' not found among '{}'".format(
bel, ", ".join(bels[site].keys())
)
)
return None
if location not in bels[site][bel]:
if DEBUG:
eprint(
"Location '{}' not found among '{}'".format(
location, ", ".join(bels[site][bel].keys())
)
)
return None
# Generate a list of tuples (celltype, instance)
cells = []
for pattern in bels[site][bel][location]:
for names in gen_all_possibilities(pattern):
cells.append(tuple(names.split(".")))
return cells
def find_timings(timings, bel, location, site, bels, corner, speed_type):
"""This function returns all the timings associated with
the selected `bel` in `location` and `site`. If timings
are not found, empty dict is returned"""
def get_timing(cell, delay, corner, speed_type):
"""
Gets timing for a particular cornet case. If not fount then chooses
the next best one.
"""
entries = cell[delay]['delay_paths'][corner.lower()]
entry = entries.get(speed_type, None)
if speed_type == 'min':
if entry is None:
entry = entries.get('avg', None)
if entry is None:
entry = entries.get('max', None)
elif speed_type == 'avg':
if entry is None:
entry = entries.get('max', None)
if entry is None:
entry = entries.get('min', None)
elif speed_type == 'max':
if entry is None:
entry = entries.get('avg', None)
if entry is None:
entry = entries.get('min', None)
if entry is None:
# if we failed with desired corner, try the opposite
newcorner = 'FAST' if corner == 'SLOW' else 'SLOW'
entry = get_timing(cell, delay, newcorner, speed_type)
assert entry is not None, (delay, corner, speed_type)
return entry
# Get cells, reverse the list so former timings will be overwritten by
# latter ones.
cells = get_cell_types_and_instances(bel, location, site, bels)
if cells is None:
return None
cells.reverse()
# Gather CELLs
cell = dict()
for ct, inst in cells:
cell = mergedicts(timings['cells'][ct][inst], cell)
# Gather timings
bel_timings = dict()
for delay in cell:
if cell[delay]['is_absolute']:
entry = get_timing(cell, delay, corner.lower(), speed_type)
elif cell[delay]['is_timing_check']:
if cell[delay]['type'] == "setuphold":
# 'setup' and 'hold' are identical
entry = get_timing(cell, delay, 'setup', speed_type)
else:
entry = get_timing(cell, delay, 'nominal', speed_type)
bel_timings[delay] = float(entry) * get_scale_seconds('1 ns')
return bel_timings
def get_bel_timings(element, timings, bels, corner, speed_type):
"""This function returns all the timings for an arch.xml
`element`. It determines the bel location by traversing
the pb_type chain"""
pb_chain = get_pb_type_chain(element)
if len(pb_chain) == 1:
return None
if 'max' in element.attrib and element.attrib['max'].startswith(
'{interconnect'):
bel = 'ROUTING_BEL'
else:
bel = pb_chain[-1]
location = pb_chain[-2]
site = remove_site_number(pb_chain[1])
result = find_timings(
timings, bel, location, site, bels, corner, speed_type
)
if DEBUG:
print(site, bel, location, result is not None, file=sys.stderr)
return result
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_arch', required=True, help="Input arch.xml file"
)
parser.add_argument('--sdf_dir', required=True, help="SDF files directory")
parser.add_argument(
'--out_arch', required=True, help="Output arch.xml file"
)
parser.add_argument(
'--bels_map',
required=True,
help="VPR <-> timing info bels mapping json file"
)
args = parser.parse_args()
arch_xml = ET.ElementTree()
root_element = arch_xml.parse(args.input_arch)
# read bels json
import json
with open(args.bels_map, 'r') as fp:
bels = json.load(fp)
timings = dict()
files = os.listdir(args.sdf_dir)
for f in files:
if not f.endswith('.sdf'):
continue
with open(args.sdf_dir + '/' + f, 'r') as fp:
try:
tmp = sdfparse.parse(fp.read())
except Exception as ex:
print("{}:".format(args.sdf_dir + '/' + f), file=sys.stderr)
print(repr(ex), file=sys.stderr)
raise
mergedicts(tmp, timings)
if DEBUG:
with open("/tmp/dump.json", 'w') as fp:
json.dump(timings, fp, indent=4)
for dm in root_element.iter('delay_matrix'):
if dm.attrib['type'] == 'max':
bel_timings = get_bel_timings(dm, timings, bels, 'SLOW', 'max')
elif dm.attrib['type'] == 'min':
bel_timings = get_bel_timings(dm, timings, bels, 'FAST', 'min')
else:
assert dm.attrib['type']
if bel_timings is None:
continue
dm.text = dm.text.format(**bel_timings)
for dc in root_element.iter('delay_constant'):
format_s = dc.attrib['max']
max_tim = get_bel_timings(dc, timings, bels, 'SLOW', 'max')
if max_tim is not None:
dc.attrib['max'] = format_s.format(**max_tim)
min_tim = get_bel_timings(dc, timings, bels, 'FAST', 'min')
if min_tim is not None:
dc.attrib['min'] = format_s.format(**min_tim)
for tq in root_element.iter('T_clock_to_Q'):
format_s = tq.attrib['max']
max_tim = get_bel_timings(tq, timings, bels, 'SLOW', 'max')
if max_tim is not None:
tq.attrib['max'] = format_s.format(**max_tim)
min_tim = get_bel_timings(tq, timings, bels, 'FAST', 'min')
if min_tim is not None:
tq.attrib['min'] = format_s.format(**min_tim)
for ts in root_element.iter('T_setup'):
bel_timings = get_bel_timings(ts, timings, bels, 'SLOW', 'max')
if bel_timings is None:
continue
ts.attrib['value'] = ts.attrib['value'].format(**bel_timings)
for th in root_element.iter('T_hold'):
bel_timings = get_bel_timings(th, timings, bels, 'FAST', 'min')
if bel_timings is None:
continue
th.attrib['value'] = th.attrib['value'].format(**bel_timings)
with open(args.out_arch, 'wb') as fp:
fp.write(ET.tostring(arch_xml))
if __name__ == "__main__":
main()
|
isc
| -8,196,329,145,353,724,000
| 30.138047
| 79
| 0.566393
| false
| 3.708099
| false
| false
| false
|
esi-mineset/spark
|
python/pyspark/rdd.py
|
1
|
96405
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
__all__ = ["RDD"]
class PythonEvalType(object):
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF = 0
SQL_BATCHED_UDF = 100
SQL_SCALAR_PANDAS_UDF = 200
SQL_GROUPED_MAP_PANDAS_UDF = 201
SQL_GROUPED_AGG_PANDAS_UDF = 202
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = None
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.settimeout(15)
sock.connect(sa)
except socket.error:
sock.close()
sock = None
continue
break
if not sock:
raise Exception("could not open socket")
# The RDD materialization time is unpredicable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sock.makefile("rb", 65536))
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
L{spark.dynamicAllocation.cachedExecutorIdleTimeout} to a high value.
The checkpoint directory set through L{SparkContext.setCheckpointDir()} is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = unicode(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
.. note:: an RDD may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. note:: this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
.. note:: V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. note:: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<http://dx.doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _load_from_socket(port, self._jrdd_deserializer)
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
| -443,223,749,307,125,300
| 37.48503
| 100
| 0.576277
| false
| 4.034695
| false
| false
| false
|
jawaidss/halalar-web
|
halalar/api/models.py
|
1
|
5956
|
from datetime import datetime, timedelta
from django_countries.fields import CountryField
import hashlib
import mailchimp
import os
import random
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.humanize.templatetags.humanize import naturaltime
from django.contrib.sites.models import Site
from django.core.mail import EmailMessage
from django.core.urlresolvers import reverse
from django.core.validators import MinValueValidator
from django.db import models
MINIMUM_AGE = 18
def _random_token(username):
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
return hashlib.sha1(salt + username).hexdigest()
def _profile_photo_upload_to(instance, filename):
return os.path.join('profiles', 'photos', '%s%sjpg' % (instance.user.username, os.extsep))
class Profile(models.Model):
MALE = 'male'
FEMALE = 'female'
GENDER_CHOICES = (
(MALE, 'Male'),
(FEMALE, 'Female'),
)
user = models.OneToOneField(User)
token = models.CharField(max_length=40, unique=True, editable=False)
photo = models.ImageField(upload_to=_profile_photo_upload_to, null=True, blank=True)
age = models.SmallIntegerField(validators=[MinValueValidator(MINIMUM_AGE)])
gender = models.CharField(max_length=10, choices=GENDER_CHOICES)
city = models.CharField(max_length=100)
country = CountryField(default='US')
religion = models.TextField()
family = models.TextField()
selfx = models.TextField('self')
community = models.TextField()
career = models.TextField()
class Meta:
ordering = ['user']
def __unicode__(self):
return self.user.username
def save(self, **kwargs):
if self.id is None and not self.token and self.user_id is not None:
self.token = _random_token(self.user.username)
super(Profile, self).save(**kwargs)
def serialize(self, include_email=True):
data = {'username': self.user.username,
'photo': self.photo and self.photo.url or None,
'age': self.age,
'gender': self.gender,
'city': self.city,
'country': self.country.code,
'religion': self.religion,
'family': self.family,
'self': self.selfx,
'community': self.community,
'career': self.career}
if include_email:
data['email'] = self.user.email
return data
def send_delayed_welcome_email(self):
site = Site.objects.get_current()
subject = site.name
message = '''Salaam,
I'm Sikander, the creator of %s. Thanks for signing up! I wanted to reach out to see if you needed any help getting started.
Best,
--
Sikander Chowhan
www.%s''' % (site.name, site.domain)
from_email = 'Sikander Chowhan <sikander@%s>' % site.domain
to = [self.user.email]
email = EmailMessage(subject, message, from_email, to)
email.send_at = datetime.now() + timedelta(days=1)
email.send()
def send_signup_notification_email(self):
site = Site.objects.get_current()
subject = self.user.username
message = '''Username: %(username)s
Email: %(email)s
Age: %(age)s
Gender: %(gender)s
City: %(city)s
Country: %(country)s
Religion: %(religion)s
Family: %(family)s
Self: %(self)s
Community: %(community)s
Career: %(career)s
https://%(domain)s%(user_url)s
https://%(domain)s%(profile_url)s''' % {'username': self.user.username,
'email': self.user.email,
'age': self.age,
'gender': self.get_gender_display(),
'city': self.city,
'country': self.country.name,
'religion': self.religion,
'family': self.family,
'self': self.selfx,
'community': self.community,
'career': self.career,
'domain': site.domain,
'user_url': reverse('admin:auth_user_change', args=[self.user.pk]),
'profile_url': reverse('admin:api_profile_change', args=[self.pk])}
from_email = 'sikander@%s' % site.domain
to = [settings.ASANA_EMAIL]
email = EmailMessage(subject, message, from_email, to)
if self.photo:
self.photo.open()
email.attach(os.path.basename(self.photo.url), self.photo.read())
self.photo.close()
email.send()
def subscribe_to_mailchimp_list(self):
m = mailchimp.Mailchimp()
m.lists.subscribe(settings.MAILCHIMP_LIST_ID,
{'email': self.user.email},
double_optin=False,
update_existing=True)
class Message(models.Model):
sender = models.ForeignKey(Profile, related_name='sent')
recipient = models.ForeignKey(Profile, related_name='received')
timestamp = models.DateTimeField(auto_now_add=True)
body = models.TextField()
class Meta:
ordering = ['timestamp']
get_latest_by = 'timestamp'
def __unicode__(self):
return self.body
def serialize(self):
return {'sender': self.sender.user.username,
'recipient': self.recipient.user.username,
'timestamp': naturaltime(self.timestamp),
'body': self.body}
def send_push_notification(self):
message = 'New message from %s' % self.sender.user.username
self.recipient.user.apnsdevice_set.all().send_message(message, badge=1)
self.recipient.user.gcmdevice_set.all().send_message(message)
|
mit
| -1,104,692,743,606,615,800
| 32.846591
| 124
| 0.582942
| false
| 3.946985
| false
| false
| false
|
misko/neon
|
neon/data/image.py
|
2
|
13051
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import ctypes as ct
from multiprocessing import Process, Semaphore
from multiprocessing.sharedctypes import Array
import logging
import numpy as np
import os
from PIL import Image as PILImage
from neon import NervanaObject
from neon.util.persist import load_obj
logger = logging.getLogger(__name__)
class Image(object):
def __init__(self):
raise NotImplementedError()
def save_pbuf(pbuf, imshape, jpgname):
"""
Takes a row-wise pixel buffer, reshapes it into the correct image size,
re-orders the rgb channels and saves out to jpg
This is purely for debugging
"""
im = PILImage.fromarray(pbuf.reshape(imshape).transpose(1, 2, 0)[:, :, [2, 1, 0]])
im.save(jpgname)
class Msg(object):
"""
Data structure encapsulating a message.
"""
def __init__(self, size):
self.s_e = Semaphore(1)
self.s_f = Semaphore(0)
self.s_buf = Array(ct.c_ubyte, size)
def send(self, func):
self.s_e.acquire()
self.s_buf.acquire()
send_result = func(self.s_buf._obj)
self.s_buf.release()
self.s_f.release()
return send_result
def recv(self, func):
self.s_f.acquire()
self.s_buf.acquire()
recv_result = func(self.s_buf._obj)
self.s_buf.release()
self.s_e.release()
return recv_result
class ImgEndpoint(NervanaObject):
"""
Parent class that sets up all common dataset config options that the client
and server will share
"""
SERVER_KILL = 255
SERVER_RESET = 254
def __init__(self, repo_dir, inner_size,
do_transforms=True, rgb=True, multiview=False,
set_name='train', subset_pct=100):
assert(subset_pct > 0 and subset_pct <= 100), "subset_pct must be between 0 and 100"
assert(set_name in ['train', 'validation'])
self.set_name = set_name if set_name == 'train' else 'val'
self.repo_dir = repo_dir
self.inner_size = inner_size
self.minibatch_size = self.be.bsz
# Load from repo dataset_cache:
try:
cache_filepath = os.path.join(repo_dir, 'dataset_cache.pkl')
dataset_cache = load_obj(cache_filepath)
except IOError:
raise IOError("Cannot find '%s/dataset_cache.pkl'. Run batch_writer to "
"preprocess the data and create batch files for imageset"
% (repo_dir))
# Should have following defined:
req_attributes = ['global_mean', 'nclass', 'val_start', 'ntrain', 'label_names',
'train_nrec', 'img_size', 'nval', 'train_start', 'val_nrec',
'label_dict', 'batch_prefix']
for r in req_attributes:
if r not in dataset_cache:
raise ValueError("Dataset cache missing required attribute %s" % (r))
self.__dict__.update(dataset_cache)
self.filename = os.path.join(repo_dir, self.batch_prefix)
self.center = False if do_transforms else True
self.flip = True if do_transforms else False
self.rgb = rgb
self.multiview = multiview
self.label = 'l_id'
if isinstance(self.nclass, dict):
self.nclass = self.nclass[self.label]
# Rough percentage
self.recs_available = getattr(self, self.set_name + '_nrec')
self.macro_start = getattr(self, self.set_name + '_start')
self.macros_available = getattr(self, 'n' + self.set_name)
self.ndata = int(self.recs_available * subset_pct / 100.)
self.start = 0
@property
def nbatches(self):
return -((self.start - self.ndata) // self.be.bsz) # ceildiv
def reset(self):
pass
class ImgMaster(ImgEndpoint):
"""
This is just a client that starts its own server process
"""
def __init__(self, repo_dir, inner_size, do_transforms=True, rgb=True,
multiview=False, set_name='train', subset_pct=100, dtype=np.float32):
super(ImgMaster, self).__init__(repo_dir, inner_size, do_transforms,
rgb, multiview, set_name, subset_pct)
# Create the communication buffers
# We have two response buffers b/c we are double buffering
npix = self.inner_size * self.inner_size * 3
ishape = (3, self.inner_size, self.inner_size)
origshape = (3, self.img_size, self.img_size)
mbsz = self.be.bsz
self.response = [Msg(npix * mbsz + 4*mbsz) for i in range(2)]
self.request = Msg(1)
self.active_idx = 0
self.jpg_idx = 0
self.server_args = [repo_dir, inner_size, do_transforms, rgb,
multiview, set_name, subset_pct]
self.server_args.append((self.request, self.response))
# For debugging, we can just make a local copy
self.local_img = np.empty((mbsz, npix), dtype=np.uint8)
self.local_lbl = np.empty((mbsz,), dtype=np.int32)
self.dev_X = self.be.iobuf(npix, dtype=dtype)
self.dev_X.lshape = ishape
self.dev_XT = self.be.empty(self.dev_X.shape[::-1], dtype=np.uint8)
self.dev_lbls = self.be.iobuf(1, dtype=np.int32)
self.dev_Y = self.be.iobuf(self.nclass, dtype=dtype)
# Crop the mean according to the inner_size
crop_start = (self.img_size - self.inner_size) / 2
crop_range = slice(crop_start, crop_start + self.inner_size)
if self.global_mean is not None:
self.mean_crop = self.global_mean.reshape(origshape)[:, crop_range, crop_range]
self.dev_mean = self.be.array(self.mean_crop.reshape(npix, 1), dtype=dtype)
else:
self.dev_mean = 127. # Just center uint8 values if missing global mean
def local_copy(bufobj):
self.local_img[:] = np.frombuffer(bufobj, dtype=np.uint8,
count=npix*mbsz).reshape(mbsz, npix)
self.local_lbl[:] = np.frombuffer(bufobj, dtype=np.int32, count=mbsz,
offset=npix*mbsz)
def device_copy(bufobj):
self.dev_XT.set(np.frombuffer(bufobj, dtype=np.uint8,
count=npix*mbsz).reshape(mbsz, npix))
self.dev_lbls.set(np.frombuffer(bufobj, dtype=np.int32, count=mbsz,
offset=npix*mbsz).reshape(1, mbsz))
def jpgview():
outname = 'tmpdir/outv2_' + str(self.jpg_idx) + '_' + str(self.local_lbl[0]) + '.jpg'
save_pbuf(self.local_img[0], ishape, outname)
self.local_copy = local_copy
self.device_copy = device_copy
self.dump_jpg = jpgview
def send_request(self, code):
def set_code(bufobj):
np.frombuffer(bufobj, dtype=np.uint8, count=1)[:] = code
self.request.send(set_code)
def recv_response(self, callback):
"""
callback is a function that will be executed while we have access
to the shared block of memory
we are switching between the response buffers modulo self.active_idx
"""
self.response[self.active_idx].recv(callback)
def init_batch_provider(self):
"""
Launches the server as a separate process and sends an initial request
"""
def server_start_cmd():
d = ImgServer(*self.server_args)
d.run_server()
p = Process(target=server_start_cmd)
p.start()
self.active_idx = 0
self.send_request(self.active_idx)
def exit_batch_provider(self):
"""
Sends kill signal to server
"""
self.send_request(self.SERVER_KILL)
def reset(self):
"""
sends request to restart data from index 0
"""
if self.start == 0:
return
# clear the old request
self.recv_response(self.device_copy)
# Reset server state
self.send_request(self.SERVER_RESET)
# Reset local state
self.start = 0
self.active_idx = 0
self.send_request(self.active_idx)
def next(self):
self.recv_response(self.local_copy)
self.active_idx = 1 if self.active_idx == 0 else 0
self.send_request(self.active_idx)
self.dump_jpg()
def __iter__(self):
for start in range(self.start, self.ndata, self.be.bsz):
end = min(start + self.be.bsz, self.ndata)
if end == self.ndata:
self.start = self.be.bsz - (self.ndata - start)
self.idx = start
self.recv_response(self.device_copy)
self.active_idx = 1 if self.active_idx == 0 else 0
self.send_request(self.active_idx)
# Separating these steps to avoid possible casting error
self.dev_X[:] = self.dev_XT.transpose()
self.dev_X[:] = self.dev_X - self.dev_mean
# Expanding out the labels on device
self.dev_Y[:] = self.be.onehot(self.dev_lbls, axis=0)
yield self.dev_X, self.dev_Y
class ImgServer(ImgEndpoint):
"""
This class interfaces with the clibrary that does the actual decoding
"""
def __init__(self, repo_dir, inner_size, do_transforms=True, rgb=True,
multiview=False, set_name='train', subset_pct=100, shared_objs=None):
super(ImgServer, self).__init__(repo_dir, inner_size, do_transforms,
rgb, multiview, set_name, subset_pct)
assert(shared_objs is not None)
libpath = os.path.dirname(os.path.realpath(__file__))
try:
self._i1klib = ct.cdll.LoadLibrary(os.path.join(libpath,
'imageset_decoder.so'))
except:
logger.error("Unable to load imageset_decoder.so. Ensure that "
"this file has been compiled")
(self.request, self.response) = shared_objs
self.worker = self._i1klib.create_data_worker(ct.c_int(self.img_size),
ct.c_int(self.inner_size),
ct.c_bool(self.center),
ct.c_bool(self.flip),
ct.c_bool(self.rgb),
ct.c_bool(self.multiview),
ct.c_int(self.minibatch_size),
ct.c_char_p(self.filename),
ct.c_int(self.macro_start),
ct.c_uint(self.ndata))
def decode_minibatch(bufobj):
self._i1klib.process_next_minibatch(self.worker, ct.POINTER(ct.c_ubyte)(bufobj))
self.decode_minibatch = decode_minibatch
def recv_request(self):
def read_code(bufobj):
return np.frombuffer(bufobj, dtype=np.uint8, count=1)[0]
return self.request.recv(read_code)
def send_response(self, active_idx):
self.response[active_idx].send(self.decode_minibatch)
def run_server(self):
while(True):
active_idx = self.recv_request()
if active_idx in (0, 1):
self.send_response(active_idx)
elif active_idx == self.SERVER_RESET:
self._i1klib.reset(self.worker)
else:
print("Server Exiting")
break
if __name__ == "__main__":
from timeit import default_timer
from neon.backends import gen_backend
from neon.util.argparser import NeonArgparser
parser = NeonArgparser(__doc__)
args = parser.parse_args()
be = gen_backend(backend='gpu', rng_seed=100)
NervanaObject.be.bsz = 128
master = ImgMaster(repo_dir=args.data_dir, set_name='train', inner_size=224, subset_pct=10)
master.init_batch_provider()
t0 = default_timer()
total_time = 0
for epoch in range(3):
for x, t in master:
print "****", epoch, master.start, master.idx, master.ndata
print t.get().argmax(axis=0)[:17]
master.send_request(master.SERVER_KILL)
|
apache-2.0
| -4,210,209,415,006,743,600
| 36.610951
| 97
| 0.564401
| false
| 3.763264
| false
| false
| false
|
Kungbib/CIPAC
|
webapp/kortkatalogen/liljeson/migrations/0001_initial.py
|
1
|
3484
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-29 12:44
from __future__ import unicode_literals
import django.contrib.postgres.indexes
import django.contrib.postgres.search
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Box',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('folder_name', models.CharField(help_text='Filkatalog på disk där denna lådas filer ligger', max_length=255, unique=True, verbose_name='Katalognamn')),
('sequence_number', models.IntegerField(db_index=True)),
('label', models.CharField(db_index=True, max_length=255, verbose_name='Etikett')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Låda',
'verbose_name_plural': 'Lådor',
'ordering': ['sequence_number'],
'abstract': False,
},
),
migrations.CreateModel(
name='Card',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, help_text='Rubriken som visas överst på en kortsida', max_length=255, verbose_name='Kortnamn')),
('filename', models.CharField(db_index=True, help_text='Filnamnet för bildfilen', max_length=255, verbose_name='Filnamn')),
('filename_back', models.CharField(db_index=True, help_text='Filnamnet för bildfilen av baksidan', max_length=255, verbose_name='Filnamn baksida')),
('ocr_text', models.TextField(blank=True, help_text='Automatiskt OCR-tolkad text från kortet.')),
('ocr_text_back', models.TextField(blank=True, help_text='Automatiskt OCR-tolkad text från kortets baksida.')),
('letter', models.CharField(blank=True, db_index=True, help_text='Anges för första kortet för att dela upp katalogen alfabetiskt.', max_length=1, null=True, verbose_name='Indexbokstav')),
('sequence_number', models.IntegerField(db_index=True, verbose_name='Sekvensnummer i låda')),
('catalog_sequence_number', models.IntegerField(blank=True, help_text='Globalt katalognummer som anger kortets plats i katalogen. Används även som identifierare.', null=True, verbose_name='Kortnummer')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('comment', models.TextField(blank=True, help_text='Visas ej för besökare.', null=True, verbose_name='Intern kommentar')),
('search_index', django.contrib.postgres.search.SearchVectorField(null=True)),
('box', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cards', to='liljeson.Box', verbose_name='kort')),
],
),
migrations.AddIndex(
model_name='card',
index=django.contrib.postgres.indexes.GinIndex(fields=['search_index'], name='liljeson_ca_search__9b97bf_gin'),
),
]
|
apache-2.0
| -1,620,285,583,742,835,200
| 57.728814
| 219
| 0.624531
| false
| 3.561151
| false
| false
| false
|
ibm-cds-labs/simple-data-pipe-connector-flightstats
|
pixiedust_flightpredict/pixiedust_flightpredict/vizFeatures.py
|
1
|
2619
|
# -------------------------------------------------------------------------------
# Copyright IBM Corp. 2016
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
import pixiedust_flightpredict.training as training
from pixiedust.display.chart.renderers.baseChartDisplay import BaseChartDisplay
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from pyspark.sql import Row
from functools import reduce
import pixiedust
myLogger = pixiedust.getLogger(__name__)
def makeList(l):
return l if isinstance(l, list) else [l]
class VizualizeFeatures(BaseChartDisplay):
def doRender(self, handlerId):
f1="departureWeather.temp"
f2="arrivalWeather.temp"
f1=f1.split(".")
f2=f2.split(".")
handler=training.getTrainingHandler()
darr=self.entity.rdd.map(lambda s: ( handler.computeClassification(s),(\
reduce(lambda x,y: getattr(x,y) if isinstance(x, Row) else getattr(getattr(s,x),y), f1) if len(f1)>1 else getattr(s,f1[0]),\
reduce(lambda x,y: getattr(x,y) if isinstance(x, Row) else getattr(getattr(s,x),y), f2) if len(f2)>1 else getattr(s,f2[0])\
)))\
.reduceByKey(lambda x,y: makeList(x) + makeList(y))\
.collect()
numClasses=handler.numClasses()
citer=iter(cm.rainbow(np.linspace(0, 1, numClasses)))
colors = [next(citer) for i in range(0, numClasses)]
legends= [handler.getClassLabel(i) for i in range(0,numClasses)]
sets=[]
fig, ax = plt.subplots(figsize=(12,8))
for t in darr:
sets.append((ax.scatter([x[0] for x in t[1]],[x[1] for x in t[1]],color=colors[t[0]],alpha=0.5),legends[t[0]]))
ax.set_ylabel("Departure Airport Temp")
ax.set_xlabel("Arrival Airport Temp")
ax.legend([x[0] for x in sets],
[x[1] for x in sets],
scatterpoints=1,
loc='lower left',
ncol=numClasses,
fontsize=12)
def doRenderChart(self):
pass
|
apache-2.0
| 2,142,105,694,167,402,800
| 39.9375
| 136
| 0.612829
| false
| 3.6375
| false
| false
| false
|
Axios-Engineering/audio-components
|
AudioSource/tests/test_AudioSource.py
|
1
|
3739
|
#!/usr/bin/env python
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in AudioSource"""
def testScaBasicBehavior(self):
#######################################################################
# Launch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
self.assertEqual(self.spd.get_id(), self.comp.ref._get_identifier())
#######################################################################
# Simulate regular component startup
# Verify that initialize nor configure throw errors
self.comp.initialize()
configureProps = self.getPropertySet(kinds=("configure",), modes=("readwrite", "writeonly"), includeNil=False)
self.comp.configure(configureProps)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../AudioSource.spd.xml") # By default tests all implementations
|
lgpl-3.0
| -6,723,459,644,412,660,000
| 49.527027
| 133
| 0.553624
| false
| 4.685464
| true
| false
| false
|
Ultimaker/Uranium
|
tests/TestRenderBatch.py
|
1
|
3618
|
from unittest.mock import MagicMock, patch
import pytest
from UM.Math.Color import Color
from UM.Math.Matrix import Matrix
from UM.Mesh.MeshBuilder import MeshBuilder
from UM.Mesh.MeshData import MeshData
from UM.View.RenderBatch import RenderBatch
test_addItem_data = [
{"item": {"transformation": Matrix(), "mesh": MeshData()}, "should_add": True},
{"item": {"transformation": None, "mesh": MeshData()}, "should_add": False},
{"item": {"transformation": None, "mesh": None}, "should_add": False},
{"item": {"transformation": Matrix(), "mesh": None}, "should_add": False},
{"item": {"transformation": Matrix(), "mesh": MeshData(), "uniforms": {}}, "should_add": True},
]
test_compare_data = [
{"item1": {}, "item2": {"sort": 1}},
{"item1": {}, "item2": {"sort": 1}},
{"item1": {"type": RenderBatch.RenderType.Solid, "sort": 0}, "item2": {"sort": 20, "type":RenderBatch.RenderType.NoType}}, # Solid trumps notype, even if sort is higher
{"item1": {"type": RenderBatch.RenderType.Transparent, "sort": 0}, "item2": {"sort": 20, "type":RenderBatch.RenderType.NoType}}
]
def test_createRenderBatch():
mocked_shader = MagicMock()
with patch("UM.View.GL.OpenGL.OpenGL.getInstance"):
render_batch = RenderBatch(mocked_shader)
# Ensure that the proper defaults are set.
assert render_batch.renderType == RenderBatch.RenderType.Solid
assert render_batch.renderMode == RenderBatch.RenderMode.Triangles
assert render_batch.shader == mocked_shader
assert not render_batch.backfaceCull
assert render_batch.renderRange is None
assert render_batch.items == []
@pytest.mark.parametrize("data", test_addItem_data)
def test_addItem(data):
mocked_shader = MagicMock()
with patch("UM.View.GL.OpenGL.OpenGL.getInstance"):
render_batch = RenderBatch(mocked_shader)
render_batch.addItem(**data["item"])
if data["should_add"]:
assert len(render_batch.items) != 0
@pytest.mark.parametrize("data", test_compare_data)
def test_compare(data):
mocked_shader = MagicMock()
with patch("UM.View.GL.OpenGL.OpenGL.getInstance"):
render_batch_1 = RenderBatch(mocked_shader, **data["item1"])
render_batch_2 = RenderBatch(mocked_shader, **data["item2"])
assert render_batch_1 < render_batch_2
def test_render():
mocked_shader = MagicMock()
with patch("UM.View.GL.OpenGL.OpenGL.getInstance"):
render_batch = RenderBatch(mocked_shader)
# Render without a camera shouldn't cause any effect.
render_batch.render(None)
assert mocked_shader.bind.call_count == 0
# Rendering with a camera should cause the shader to be bound and released (even if the batch is empty)
mocked_camera = MagicMock()
mocked_camera.getWorldTransformation = MagicMock(return_value = Matrix())
mocked_camera.getViewProjectionMatrix = MagicMock(return_value=Matrix())
with patch("UM.View.GL.OpenGLContext.OpenGLContext.properties"):
render_batch.render(mocked_camera)
assert mocked_shader.bind.call_count == 1
assert mocked_shader.release.call_count == 1
# Actualy render with an item in the batch
mb = MeshBuilder()
mb.addPyramid(10, 10, 10, color=Color(0.0, 1.0, 0.0, 1.0))
mb.calculateNormals()
mesh_data = mb.build()
render_batch.addItem(Matrix(), mesh_data, {})
with patch("UM.View.GL.OpenGL.OpenGL.getInstance"):
with patch("UM.View.GL.OpenGLContext.OpenGLContext.properties"):
render_batch.render(mocked_camera)
assert mocked_shader.bind.call_count == 2
assert mocked_shader.release.call_count == 2
|
lgpl-3.0
| -4,333,633,561,524,502,000
| 37.913978
| 173
| 0.68325
| false
| 3.519455
| true
| false
| false
|
JSBCCA/pythoncode
|
exercises/exercise_10_18_16.py
|
1
|
1586
|
import sys
# open cust_info_login.txt
with open('cust_info_login.txt', 'r') as file:
customer_login = file.read().strip().split('\n')
cust_login = list(map(lambda c: c.split(' _ '), customer_login))
# save usernames and passwords to a list
users_and_passwords = []
for customer in cust_login:
unpw = [customer[2], customer[3]]
users_and_passwords.append(unpw)
# check for username and password
lock = True
while lock is True:
username = input("Please enter your username. Type 'q' to quit. ").strip()
if username.lower() == 'q':
sys.exit()
password = input("Please enter your password. ").strip()
if password.lower() == 'q':
sys.exit()
for user in users_and_passwords:
if username == user[0] and password == user[1]:
lock = False
# ask for new password
lock = True
while lock is True:
new_pass = input(
"What'd you like your password to be? Must be 6 characters. ").strip()
if len(new_pass) == 6:
# get user position in order to change password
for item in cust_login:
if (username in item) and (password in item):
item.remove(password)
item.append(new_pass)
# change password
with open('cust_info_login.txt', 'w') as file:
for i in range(len(cust_login)):
file.write(cust_login[i][0] + ' _ ' + cust_login[i][1] + ' _' +
' ' + cust_login[i][2] + ' _ ' + cust_login[i][3] +
'\n')
print("Password has been changed.")
lock = False
|
mit
| 1,050,089,105,675,520,000
| 37.682927
| 79
| 0.571248
| false
| 3.572072
| false
| false
| false
|
jonge-democraten/jdleden
|
jdleden/afdelingrondschuif.py
|
1
|
4580
|
import logging
from hemres.management.commands.janeus_unsubscribe import Command as CommandUnsub
from hemres.management.commands.janeus_subscribe import Command as CommandSub
from jdleden import ledenlijst
from jdleden import afdelingen
from jdleden import afdelingenoud
logger = logging.getLogger(__name__)
def move_members(members_file, dryrun):
logger.info('BEGIN')
logger.info('file: ' + members_file)
logger.info('dryrun: ' + str(dryrun))
afdelingen_new = afdelingen.AFDELINGEN
afdelingen_oud = afdelingenoud.AFDELINGEN
logger.info("Checking consistency new and old postcode ranges...")
if not check_postcode_indeling(afdelingen_new):
logger.error('postcode check for new departments failed')
raise RuntimeError
if not check_postcode_indeling(afdelingen_oud):
logger.error('postcode check for old departments failed')
raise RuntimeError
logger.info("Reading %s ..." % members_file)
members = ledenlijst.read_xls(members_file)
logger.info("Reading complete")
logger.info("Calculating reallocated members")
reallocated = get_reallocated_members(members)
logger.info("Doing mass (un)subscribes")
for member in reallocated:
lidnummer = member[ledenlijst.LIDNUMMER]
town = member[ledenlijst.WOONPLAATS]
postcode = member[ledenlijst.POSTCODE]
digits = ledenlijst.parse_postcode(postcode)
afdeling_from = find_afdeling(afdelingen_oud, digits)
afdeling_to = find_afdeling(afdelingen_new, digits)
nieuwsbrief_from = "nieuwsbrief-" + afdeling_from.lower()
nieuwsbrief_to = "nieuwsbrief-" + afdeling_to.lower()
logger.info('Move a member living in ' + town + ' from ' + afdeling_from + ' to ' + afdeling_to)
if not dryrun:
CommandUnsub.unsubscribe(lidnummer, nieuwsbrief_from)
CommandSub.subscribe(lidnummer, nieuwsbrief_to)
if dryrun:
logger.warning("Dry-run. No actual database changes!")
logger.info('END')
return reallocated
def get_reallocated_members(members):
reallocated_members = []
for member in members.values():
postcode_string = member[ledenlijst.POSTCODE]
postcode = ledenlijst.parse_postcode(postcode_string)
if not postcode:
continue
if postcode >= 1000 and postcode < 10000:
afdeling_old = find_afdeling(afdelingenoud.AFDELINGEN, postcode)
afdeling_new = find_afdeling(afdelingen.AFDELINGEN, postcode)
if afdeling_new != afdeling_old:
reallocated_members.append(member)
else:
ledenlijst.logger.warning('invalid postcode: ' + str(postcode) + ' for member living in ' + member[ledenlijst.WOONPLAATS])
return reallocated_members
def find_afdeling(afdelingsgrenzen, postcode):
for afdeling, postcodes in afdelingsgrenzen.items():
for postcoderange in postcodes:
if postcode >= postcoderange[0] and postcode <= postcoderange[1]:
return afdeling
return 'Afdeling unknown'
def check_postcode_indeling(afdelingen):
no_overlap = check_overlap_afdelingen(afdelingen)
correct_ranges = check_postcode_ranges(afdelingen)
return no_overlap and correct_ranges
def check_postcode_ranges(afdelingsgrenzen):
correct_ranges = True
for _afdeling, postcodes in afdelingsgrenzen.items():
for postcoderange in postcodes:
if postcoderange[0] > postcoderange[1]:
ledenlijst.logger.error('wrong range, lower bound is higher than upper bound: ' + str(postcoderange))
correct_ranges = False
return correct_ranges
def check_overlap_afdelingen(afdelingsgrenzen):
overlapping_postcodes = []
for i in range(1000, 10000):
counter = 0
afdelingen = []
for afdeling, postcodes in afdelingsgrenzen.items():
for postcoderange in postcodes:
if i >= postcoderange[0] and i <= postcoderange[1]:
counter += 1
afdelingen.append(afdeling)
if counter > 1:
overlapping_postcodes.append(i)
ledenlijst.logger.warning('postcode: ' + str(i) + ' in afdelingen: ' + str(afdelingen))
if counter == 0:
ledenlijst.logger.warning('postcode: ' + str(i) + ' heeft geen afdeling')
if len(overlapping_postcodes) > 0:
ledenlijst.logger.error('overlapping postcodes: ' + str(len(overlapping_postcodes)))
return False
return True
|
mit
| 244,635,679,448,980,670
| 39.530973
| 134
| 0.666376
| false
| 3.4462
| false
| false
| false
|
hip-odoo/odoo
|
addons/base_geolocalize/models/res_partner.py
|
5
|
2828
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
import urllib2
from odoo import api, fields, models, tools, _
from odoo.exceptions import UserError
def geo_find(addr):
if not addr:
return None
url = 'https://maps.googleapis.com/maps/api/geocode/json?sensor=false&address='
url += urllib2.quote(addr.encode('utf8'))
try:
result = json.load(urllib2.urlopen(url))
except Exception as e:
raise UserError(_('Cannot contact geolocation servers. Please make sure that your Internet connection is up and running (%s).') % e)
if result['status'] != 'OK':
return None
try:
geo = result['results'][0]['geometry']['location']
return float(geo['lat']), float(geo['lng'])
except (KeyError, ValueError):
return None
def geo_query_address(street=None, zip=None, city=None, state=None, country=None):
if country and ',' in country and (country.endswith(' of') or country.endswith(' of the')):
# put country qualifier in front, otherwise GMap gives wrong results,
# e.g. 'Congo, Democratic Republic of the' => 'Democratic Republic of the Congo'
country = '{1} {0}'.format(*country.split(',', 1))
return tools.ustr(', '.join(filter(None, [street,
("%s %s" % (zip or '', city or '')).strip(),
state,
country])))
class ResPartner(models.Model):
_inherit = "res.partner"
partner_latitude = fields.Float(string='Geo Latitude', digits=(16, 5))
partner_longitude = fields.Float(string='Geo Longitude', digits=(16, 5))
date_localization = fields.Date(string='Geolocation Date')
@api.multi
def geo_localize(self):
# We need country names in English below
for partner in self.with_context(lang='en_US'):
result = geo_find(geo_query_address(street=partner.street,
zip=partner.zip,
city=partner.city,
state=partner.state_id.name,
country=partner.country_id.name))
if result is None:
result = geo_find(geo_query_address(
city=partner.city,
state=partner.state_id.name,
country=partner.country_id.name
))
if result:
partner.write({
'partner_latitude': result[0],
'partner_longitude': result[1],
'date_localization': fields.Date.context_today(partner)
})
return True
|
agpl-3.0
| 6,065,413,060,464,888,000
| 38.830986
| 140
| 0.542079
| false
| 4.41875
| false
| false
| false
|
lisaglendenning/pypetri
|
source/pypetri/graph/graph.py
|
1
|
5541
|
# @copyright
# @license
import collections
import networkx as nx
import pypetri.trellis as trellis
#############################################################################
#############################################################################
class Graph(collections.Mapping, trellis.Component):
CHANGE_ACTIONS = range(3)
ADD_ACTION, REMOVE_ACTION, CLEAR_ACTION = CHANGE_ACTIONS
CHANGE_TYPES = range(2)
NODE_TYPE, EDGE_TYPE = CHANGE_TYPES
Graph = nx.Graph
graph = trellis.attr(None)
changes = trellis.todo(list)
to_change = changes.future
def __init__(self, graph=None, *args, **kwargs):
if graph is None:
graph = self.Graph(*args, **kwargs)
super(Graph, self).__init__(graph=graph)
for k in dir(graph):
if not hasattr(self, k):
setattr(self, k, getattr(graph, k))
def __getitem__(self, key):
return self.graph[key]
def __iter__(self):
return iter(self.graph)
def __len__(self):
return len(self.graph)
@trellis.modifier
def add_node(self, *args, **kwargs):
change = (self.ADD_ACTION, self.NODE_TYPE, args, kwargs,)
self.to_change.append(change)
@trellis.modifier
def add_nodes_from(self, nbunch):
for n in nbunch:
self.add_node(n)
@trellis.modifier
def remove_node(self, *args, **kwargs):
change = (self.REMOVE_ACTION, self.NODE_TYPE, args, kwargs,)
self.to_change.append(change)
@trellis.modifier
def remove_nodes_from(self, nbunch):
for n in nbunch:
self.remove_node(n)
@trellis.modifier
def add_edge(self, *args, **kwargs):
change = (self.ADD_ACTION, self.EDGE_TYPE, args, kwargs,)
self.to_change.append(change)
@trellis.modifier
def add_edges_from(self, ebunch):
for e in ebunch:
self.add_edge(*e)
@trellis.modifier
def remove_edge(self, *args, **kwargs):
change = (self.REMOVE_ACTION, self.EDGE_TYPE, args, kwargs,)
self.to_change.append(change)
@trellis.modifier
def remove_edges_from(self, ebunch):
for e in ebunch:
self.remove_edge(*e)
@trellis.modifier
def add_star(self, nbunch):
self.add_nodes_from(nbunch)
hub = nbunch[0]
for i in xrange(1, len(nbunch)):
self.add_edge(hub, nbunch[i])
@trellis.modifier
def add_path(self, nbunch):
self.add_nodes_from(nbunch)
for i in xrange(len(nbunch)-1):
self.add_edge(nbunch[i],nbunch[i+1])
@trellis.modifier
def add_cycle(self, nbunch):
self.add_path(nbunch)
self.add_edge(nbunch[-1], nbunch[0])
@trellis.modifier
def clear(self):
change = (self.CLEAR_ACTION,)
self.to_change.append(change)
@trellis.maintain
def regraph(self):
graph = self.graph
for change in self.changes:
self.apply(graph, change)
if self.changes:
trellis.mark_dirty()
def apply(self, graph, change, log=True):
undos = []
action = change[0]
if action == self.ADD_ACTION:
type, args, kwargs = change[1:]
if type == self.NODE_TYPE:
if not graph.has_node(args[0]):
undo = (self.REMOVE_ACTION, type, args,)
undos.append(undo)
graph.add_node(*args, **kwargs)
elif type == self.EDGE_TYPE:
if not graph.has_edge(*args[0:2]):
undo = (self.REMOVE_ACTION, type, args,)
undos.append(undo)
graph.add_edge(*args, **kwargs)
elif action == self.REMOVE_ACTION:
type, args, kwargs = change[1:]
if type == self.NODE_TYPE:
u = args[0]
if graph.has_node(u):
edges = graph.edges(u, data=True)
for edge in edges:
undo = (self.ADD_ACTION, self.EDGE_TYPE, edge[:2], edge[2],)
undos.append(undo)
undo = (self.ADD_ACTION, type, (u,), dict(graph.node[u]),)
undos.append(undo)
graph.remove_node(*args, **kwargs)
elif type == self.EDGE_TYPE:
u,v = args[0:2]
if graph.has_edge(u,v):
undo = (self.ADD_ACTION, type, args, dict(graph.edge[u][v]),)
undos.append(undo)
graph.remove_edge(*args, **kwargs)
elif action == self.CLEAR_ACTION:
for n in graph.nodes_iter(data=True):
undo = (self.ADD_ACTION, self.NODE_TYPE, n[:1], n[-1],)
undos.append(undo)
for e in graph.edges_iter(data=True):
undo = (self.ADD_ACTION, self.EDGE_TYPE, e[:2], e[-1],)
undos.append(undo)
graph.clear()
else:
assert False
if log:
trellis.on_undo(self.undo, graph, undos)
def undo(self, graph, changes):
for change in changes:
self.apply(graph, change, False)
def snapshot(self):
return self.graph.copy()
#############################################################################
#############################################################################
|
mit
| 4,138,626,352,126,483,000
| 31.982143
| 84
| 0.495398
| false
| 3.813489
| false
| false
| false
|
mpi-sws-rse/datablox
|
blox/categorize_shard__1_0/b_categorize_shard.py
|
1
|
1171
|
from block import *
from shard import *
from logging import ERROR, WARN, INFO, DEBUG
import time
class categorize_shard(Shard):
@classmethod
def initial_configs(cls, config):
return [config for i in range(config["nodes"])]
@classmethod
def node_type(self):
return {"name": "Categorize", "input_port": "input", "output_port": "output", "port_type": "PUSH"}
def on_load(self, config):
self.config = config
self.nodes = config["nodes"]
self.max_nodes = 20
self.current_node = 0
self.add_port("input", Port.PUSH, Port.UNNAMED, [])
self.log(INFO, "Categorize shard loaded")
def config_for_new_node(self):
return self.config
def recv_push(self, port, log):
self.log(INFO, "%s sending to port %d" % (self.id, self.current_node))
self.push_node(self.current_node, log)
self.current_node = (self.current_node + 1) % self.nodes
def can_add_node(self):
return (self.nodes < self.max_nodes)
def should_add_node(self, node_num):
self.log(INFO, self.id + " should_add_node got a new node")
self.nodes += 1
# start distribution from the new node
self.current_node = node_num
|
apache-2.0
| 438,109,497,504,606,850
| 29.051282
| 102
| 0.652434
| false
| 3.280112
| true
| false
| false
|
stphivos/django-mock-queries
|
setup.py
|
1
|
1603
|
from setuptools import setup
def read_md(filename):
return open(filename).read()
def parse_requirements(filename):
reqs = []
with open(filename, 'r') as f:
reqs = f.read().splitlines()
if not reqs:
raise RuntimeError("Unable to read requirements from '%s'" % filename)
return reqs
setup(
name='django_mock_queries',
version='2.1.6',
description='A django library for mocking queryset functions in memory for testing',
long_description=read_md('README.md'),
long_description_content_type='text/markdown',
url='https://github.com/stphivos/django-mock-queries',
author='Phivos Stylianides',
author_email='stphivos@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Testing :: Mocking',
'Topic :: Software Development :: Testing :: Unit',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='django orm mocking unit-testing tdd',
packages=['django_mock_queries'],
install_requires=parse_requirements('requirements/core.txt'),
)
|
mit
| 7,877,405,215,237,546,000
| 33.847826
| 88
| 0.631316
| false
| 4.218421
| false
| false
| false
|
jantman/awslimitchecker
|
awslimitchecker/alerts/base.py
|
1
|
5566
|
"""
awslimitchecker/alerts/base.py
The latest version of this package is available at:
<https://github.com/jantman/awslimitchecker>
################################################################################
Copyright 2015-2019 Jason Antman <jason@jasonantman.com>
This file is part of awslimitchecker, also known as awslimitchecker.
awslimitchecker is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
awslimitchecker is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with awslimitchecker. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/awslimitchecker> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
################################################################################
"""
import logging
from abc import ABCMeta, abstractmethod
logger = logging.getLogger(__name__)
class AlertProvider(object):
__metaclass__ = ABCMeta
def __init__(self, region_name):
"""
Initialize an AlertProvider class. This MUST be overridden by
subclasses. All configuration must be passed as keyword arguments
to the class constructor (these come from ``--alert-config`` CLI
arguments). Any dependency imports must be made in the constructor.
The constructor should do as much as possible to validate configuration.
:param region_name: the name of the region we're connected to
:type region_name: str
"""
self._region_name = region_name
@abstractmethod
def on_success(self, duration=None):
"""
Method called when no thresholds were breached, and run completed
successfully. Should resolve any open incidents (if the service supports
that functionality) or else simply return.
:param duration: duration of the usage/threshold checking run
:type duration: float
"""
raise NotImplementedError()
@abstractmethod
def on_critical(self, problems, problem_str, exc=None, duration=None):
"""
Method called when the run encountered errors, or at least one critical
threshold was met or crossed.
:param problems: dict of service name to nested dict of limit name to
limit, same format as the return value of
:py:meth:`~.AwsLimitChecker.check_thresholds`. ``None`` if ``exc`` is
specified.
:type problems: dict or None
:param problem_str: String representation of ``problems``, as displayed
in ``awslimitchecker`` command line output. ``None`` if ``exc`` is
specified.
:type problem_str: str or None
:param exc: Exception object that was raised during the run (optional)
:type exc: Exception
:param duration: duration of the run
:type duration: float
"""
raise NotImplementedError()
@abstractmethod
def on_warning(self, problems, problem_str, duration=None):
"""
Method called when one or more warning thresholds were crossed, but no
criticals and the run did not encounter any errors.
:param problems: dict of service name to nested dict of limit name to
limit, same format as the return value of
:py:meth:`~.AwsLimitChecker.check_thresholds`.
:type problems: dict or None
:param problem_str: String representation of ``problems``, as displayed
in ``awslimitchecker`` command line output.
:type problem_str: str or None
:param duration: duration of the run
:type duration: float
"""
raise NotImplementedError()
@staticmethod
def providers_by_name():
"""
Return a dict of available AlertProvider subclass names to the class
objects.
:return: AlertProvider class names to classes
:rtype: dict
"""
return {x.__name__: x for x in AlertProvider.__subclasses__()}
@staticmethod
def get_provider_by_name(name):
"""
Get a reference to the provider class with the specified name.
:param name: name of the AlertProvider subclass
:type name: str
:return: AlertProvider subclass
:rtype: ``class``
:raises: RuntimeError
"""
try:
return AlertProvider.providers_by_name()[name]
except KeyError:
raise RuntimeError(
'ERROR: "%s" is not a valid AlertProvider class name' % name
)
|
agpl-3.0
| -9,175,748,815,709,389,000
| 38.197183
| 80
| 0.634567
| false
| 4.852659
| false
| false
| false
|
pkonink/complete-python-bootcamp
|
capstone.py
|
1
|
7045
|
# Capstone project for Jose Portilla's Complete Python Bootcamp course at udemy.com
# Project Idea: Inverted index - An Inverted Index is a data structure used to create full text search.
# Given a set of text files, implement a program to create an inverted index. Also create a
# user interface to do a search using that inverted index which returns a list of files that
# contain the query term / terms. The search index can be in memory.
# Word-level inverted index - Features:
# * loads text file from web into memory, scans it and builds index
# + index stores as {'word':[(str('path/to/file'),int(pos_of__occurrence)),(...)]}
# * combines the dictionary with main database of all scanned text files
# + main dictionary stored locally as a sqlite file
# * UI that allows entry of multiple words (phrases) and return of snippets from relevant text files
# + returns results for both single words and complete phrase (ie, "love", "you", and "love you")
# + UI in cli only, no web or widget
# * Two tables for normalized storage
# + table: CREATE TABLE words(id INTEGER PRIMARY KEY AUTOINCREMENT, word TEXT);
# + table: CREATE TABLE words_loc(id INTEGER PRIMARY KEY AUTOINCREMENT, words_id INTEGER, url TEXT, loc INTEGER);
import urllib2
import sqlite3
import re
class FileLoad(object):
def __init__(self,file_loc):
'''loads file, builds index, adds to main index'''
self.return_list = {}
try:
response = urllib2.urlopen(file_loc)
html = response.read()
except:
html = False
print "%s is not a valid URL."%(file_loc)
if html != False:
# progressively remove script, style, then all HTML tags
clean_html = re.sub(r'<script[\s\S]+?>[\s\S]+?<\/script>','',html)
clean_html = re.sub(r'<style[\s\S]+?>[\s\S]+?<\/style>','',clean_html)
clean_html = re.sub(r'<[^<]+?>', '', clean_html)
# remove all special characters except single - and single ' to help build a more clean word list
real_clean_html = re.sub(r'^[\'-]|[\'-]$|[-]{2,}|[\']{2,}|([\'-])\W|\W([\'-])|[^a-z\'\s-]+', ' ', clean_html.lower())
# created ordered list of unique words from file
word_list = sorted(set(real_clean_html.split()))
# now add to sqlite database
try:
conn = sqlite3.connect('capstone.db')
self.cursor = conn.cursor()
# find locations for each word and update database where necessary
for w in word_list:
# We're only interested in words with more than one letter
if len(w) > 1:
# Check if word is already in database; if not, add it
w_id = self.check_for_word(w)
if w_id == False:
self.cursor.execute("insert into words(word) values(?)",(w,))
conn.commit()
w_id = self.cursor.lastrowid
# Get word location in document
for word_loc in [p.start() for p in re.finditer(r'\s%s[\s|-|\.|,]'%(w),clean_html.lower())]:
# First, check if this word instance is already in database
self.cursor.execute("select url,loc from words_loc where words_id = ?",(w_id,))
r = self.cursor.fetchone()
# If that instance of word isn't recorded already, add to the database
if r[1] != word_loc or r[0] != file_loc:
self.cursor.execute("insert into words_loc(words_id,url,loc) values(?,?,?)",(w_id,file_loc,word_loc))
conn.commit()
# Close connection and print affirmative message.
conn.close()
print "Index successfully updated for: %s"%(file_loc)
# Print an error if there's a problem with adding to database
except sqlite3.Error, e:
print "Error %s:"%(e.args[0])
def check_for_word(self,word):
'''Checks if a word is already recorded in database'''
self.cursor.execute("select id from words where word = ?",(word,))
result = self.cursor.fetchone()
if result:
return result[0]
else:
return False
class FileSnip(object):
def __init__(self,result):
'''loads file, converts to string, and returns text within n spaces before and
after word_position for display
result = (file,word_position)'''
#for word_loc in [p.start() for p in re.finditer(r'\s%s[\s|-|\.|,]'%(w),clean_html.lower())]:
# print loc,"Excerpt: ...",clean_html[loc-40:loc+40],"...\n"
print result
class SearchScan(object):
def __init__(self,word_list):
'''scans index for occurrences of words in word_list
scans index for phrases; phrase = words in word_list within n pos of each other
results = [(word,file,loc),(...)]'''
print word_list
class SearchOutput(object):
def __init__(self,result_list):
''' combines and displays results to screen word, URL, and file snippet for each result'''
print result_list
class UserInput(object):
def __init__(self):
pass
def user_activity(self):
''' asks user to load file or search for terms and calls pertinent method'''
while True:
task = raw_input('Type "search" or "load" for activity: ').upper()
if task == 'SEARCH':
self.search_query()
break
elif task == 'LOAD':
self.load_file()
break
def load_file(self):
''' takes file location from user and calls FileLoad'''
file = raw_input("Enter full URL including http:// of page to load): ")
# do validation here
FileLoad(file)
def search_query(self):
''' asks for search terms, calls SearchScan, and returns results as SearchOutput'''
search = raw_input("Enter search term: ")
word_list = search.split()
for item in SearchScan(word_list):
results.append([item['0'],item['1'],FileSnip([item['1'],item['2']])])
SearchOutput(results)
def again_or_die(self):
''' asks for another search query or end program'''
while True:
cont = raw_input("Press y to continue or any other key to quit. ").upper()
if cont == "Y":
return True
break
else:
return False
break
class main(object):
def __init__(self):
ui = UserInput()
while True:
#ask for input
ui.user_activity()
#show output
if ui.again_or_die() == False:
print "Goodbye!"
break
main()
|
cc0-1.0
| 1,865,882,465,895,336,000
| 40.686391
| 133
| 0.555713
| false
| 4.13439
| false
| false
| false
|
bouthors/ZenPacks.MatthieuBouthors.pfSense
|
setup.py
|
1
|
2450
|
################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
# NB: PACKAGES is deprecated
NAME = "ZenPacks.MatthieuBouthors.pfSense"
VERSION = "0.7.0"
AUTHOR = "Matthieu Bouthors"
LICENSE = "GPL v2"
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.MatthieuBouthors']
PACKAGES = ['ZenPacks', 'ZenPacks.MatthieuBouthors', 'ZenPacks.MatthieuBouthors.pfSense']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = ""
PREV_ZENPACK_NAME = ""
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name=NAME,
version=VERSION,
author=AUTHOR,
license=LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers=COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName=PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages=NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages=find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data=True,
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires=INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points={
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe=False,
)
|
gpl-2.0
| 913,829,869,681,725,400
| 36.121212
| 89
| 0.707347
| false
| 3.640416
| false
| false
| false
|
ZeromusSoftware/RPi3500
|
big_data/adamant_algorithm/square_meter_price.py
|
1
|
2747
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 26 11:25:06 2016
@author: william
"""
import urllib
import pygeoj
import unicodedata
import pandas as pd
sectors = {"Bouches-du-Rhône":[]}
file13 = pygeoj.load("Data/france-geojson/departements/13/communes.geojson")
for feature in file13:
s = feature.properties['nom']
sectors["Bouches-du-Rhône"].append(s)
communes = sectors["Bouches-du-Rhône"]
def refresh_sqm_price():
prix,evolution = [],[]
for s in communes:
normalized_str = ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
commune = "v_"+normalized_str.lower().replace("'","-").replace(" ","-")+"_13"
if "marseille" in commune:
try :
arrondissement = str(int(commune[12:14]))
except :
arrondissement = "0"+commune[12]
commune = "v_marseille_130"+arrondissement
page=urllib.urlopen('http://www.efficity.com/prix-immobilier-m2/'+commune)
strpage=page.read()
print(commune)
try:
stringevolutiontoseek = '<p class="evol-values">'
indexevol = strpage.index(stringevolutiontoseek)
strevolution = strpage[indexevol+len(stringevolutiontoseek):indexevol+len(stringevolutiontoseek)+4]
floatevolution = float(strevolution.replace(" ",""))
print(floatevolution)
evolution.append(floatevolution)
except :
print("evolution raté..")
evolution.append(0.0)
try:
stringpricetoseek = '<div class="price-per-sqm-width price-per-sqm-values">'
indexprice = strpage.index(stringpricetoseek)
firstcut = strpage[indexprice+len(stringpricetoseek):indexprice+len(stringpricetoseek)+50]
index1 = firstcut.index('<strong>')+len('<strong>')
index2 = firstcut.index('</strong>')+1
strprix = firstcut[index1:index2]
intprix = 0
n = len(strprix)
k = 1
for i in range(n):
try:
if type (int(strprix[n-i-1]))==int:
intprix+=k*int(strprix[n-i-1])
k=k*10
except:
pass
print(intprix)
prix.append(intprix)
except:
return ("prix raté..")
rows = []
for i in range(len(communes)):
rows.append((communes[i],prix[i],evolution[i]))
df = pd.DataFrame(rows,columns = ["Commune","Prix du m2","Evolution sur 3 mois"])
df.to_csv('Data/square_meters_price.csv')
return True
|
gpl-2.0
| -3,089,712,412,697,723,000
| 32.45122
| 113
| 0.549234
| false
| 3.39777
| false
| false
| false
|
Kediel/Violent-Python
|
Chapter 2/bruteKey.py
|
1
|
2401
|
import pexpect
import optparse
import os
from threading import *
maxConnections = 5
connection_lock = BoundedSemaphore(value = maxConnections)
Stop = False
Fails = 0
def connect(user, host, keyfile, release):
global Stop, Fails
try:
perm_denied = 'Permission denied'
ssh_newkey = 'Are you sure you want to continue'
conn_closed = 'Connection closed by remote host'
opt = ' -o PasswordAuthentication = no'
connStr = 'ssh ' + user +\
'@' + host + '-i ' + keyfile + opt
child = pexpect.spawn(connStr)
ret = child.expect([pexpect.TIMEOUT, perm_denied, \
ssh_newkey, conn_closed, '$', '#', ])
if ret == 2:
print '[-] Adding Host to `/.ssh/known_hosts'
child.sendline('yes')
connect(user, host, keyfile, False)
elif ret == 3:
print '[-] Connection Closed By Remote Host'
Fails += 1
elif ret > 3:
print '[+] Success. ' + str(keyfile)
Stop = True
finally:
if release:
connection_lock.release()
def main():
parser = optparse.OptionParser('usage%prog -H ' +\
'<target host> -u <user> -d <directory>')
parser.add_option('-H', dest = 'tgtHost', type = 'string', \
help = 'specify target host')
parser.add_option('-u', dest = 'user', type = 'string', \
help = 'specify the user')
parser.add_option('-d', dest = 'passDir', type = 'string', \
help = 'specify directory with keys')
(options, args) = parser.parse_args()
host = options.tgtHost
user = options.user
passDir = options.passDir
if host == None or user == None or passDir == None:
print parser.usage
exit(0)
for filename in os.listdir(passDir):
if Stop:
print '[*] Exiting: Key Found.'
exit(0)
if Fails > 5:
print '[!] Exiting: '+\
'Too Many Connections Closed by Remote Host.'
print '[!] Adjust number of simultaneous threads.'
exit(0)
connection_lock.acquire()
fullpath = os.path.join(passDir, filename)
print '[-] Testing keyfile ' + str(fullpath)
t = Thread(target = connect, args = (user, host, fullpath, True))
child = t.start()
if __name__ == '__main__':
main()
|
mit
| -5,777,584,655,809,432,000
| 25.097826
| 69
| 0.546022
| false
| 3.968595
| false
| false
| false
|
sfcta/TAutils
|
wrangler/TransitParser.py
|
1
|
21183
|
from simpleparse.common import numbers, strings, comments
from simpleparse import generator
from simpleparse.parser import Parser
from simpleparse.dispatchprocessor import *
import re
from .Linki import Linki
from .Logger import WranglerLogger
from .Node import Node
from .PNRLink import PNRLink
from .Supplink import Supplink
from .TransitLine import TransitLine
from .TransitLink import TransitLink
from .ZACLink import ZACLink
__all__ = [ 'TransitParser' ]
WRANGLER_FILE_SUFFICES = [ "lin", "link", "pnr", "zac", "access", "xfer" ]
# PARSER DEFINITION ------------------------------------------------------------------------------
# NOTE: even though XYSPEED and TIMEFAC are node attributes here, I'm not sure that's really ok --
# Cube documentation implies TF and XYSPD are node attributes...
transit_file_def=r'''
transit_file := ( accessli / line / link / pnr / zac / supplink )+, smcw*, whitespace*
line := whitespace?, smcw?, c"LINE", whitespace, lin_attr*, lin_node*, whitespace?
lin_attr := ( lin_attr_name, whitespace?, "=", whitespace?, attr_value, whitespace?,
comma, whitespace?, semicolon_comment* )
lin_nodeattr := ( lin_nodeattr_name, whitespace?, "=", whitespace?, attr_value, whitespace?, comma?, whitespace?, semicolon_comment* )
lin_attr_name := c"allstops" / c"color" / (c"freq",'[',[1-5],']') / c"mode" / c"name" / c"oneway" / c"owner" / c"runtime" / c"timefac" / c"xyspeed" / c"longname"
lin_nodeattr_name := c"access_c" / c"access" / c"delay" / c"xyspeed" / c"timefac"
lin_node := lin_nodestart?, whitespace?, nodenum, spaces*, comma?, spaces*, semicolon_comment?, whitespace?, lin_nodeattr*
lin_nodestart := (whitespace?, "N", whitespace?, "=")
link := whitespace?, smcw?, c"LINK", whitespace, link_attr*, whitespace?, semicolon_comment*
link_attr := (( (link_attr_name, whitespace?, "=", whitespace?, attr_value) /
(word_nodes, whitespace?, "=", whitespace?, nodepair) /
(word_modes, whitespace?, "=", whitespace?, numseq) ),
whitespace?, comma?, whitespace?)
link_attr_name := c"dist" / c"speed" / c"time" / c"oneway"
pnr := whitespace?, smcw?, c"PNR", whitespace, pnr_attr*, whitespace?
pnr_attr := (( (pnr_attr_name, whitespace?, "=", whitespace?, attr_value) /
(word_node, whitespace?, "=", whitespace?, ( nodepair / nodenum )) /
(word_zones, whitespace?, "=", whitespace?, numseq )),
whitespace?, comma?, whitespace?, semicolon_comment*)
pnr_attr_name := c"time" / c"maxtime" / c"distfac" / c"cost"
zac := whitespace?, smcw?, c"ZONEACCESS", whitespace, zac_attr*, whitespace?, semicolon_comment*
zac_attr := (( (c"link", whitespace?, "=", whitespace?, nodepair) /
(zac_attr_name, whitespace?, "=", whitespace?, attr_value) ),
whitespace?, comma?, whitespace?)
zac_attr_name := c"mode"
supplink := whitespace?, smcw?, c"SUPPLINK", whitespace, supplink_attr*, whitespace?, semicolon_comment*
supplink_attr := (( (supplink_attr_name, whitespace?, "=", whitespace?, attr_value) /
(c"n", whitespace?, "=", whitespace?, nodepair )),
whitespace?, comma?, whitespace?)
supplink_attr_name:= c"mode" / c"dist" / c"speed" / c"oneway" / c"time"
accessli := whitespace?, smcw?, nodenumA, spaces?, nodenumB, spaces?, accesstag?, spaces?, (float/int)?, spaces?, semicolon_comment?
accesstag := c"wnr" / c"pnr"
word_nodes := c"nodes"
word_node := c"node"
word_modes := c"modes"
word_zones := c"zones"
numseq := int, (spaces?, ("-" / ","), spaces?, int)*
nodepair := nodenum, spaces?, ("-" / ","), spaces?, nodenum
nodenumA := nodenum
nodenumB := nodenum
nodenum := int
attr_value := alphanums / string_single_quote / string_double_quote
alphanums := [a-zA-Z0-9\.]+
<comma> := [,]
<whitespace> := [ \t\r\n]+
<spaces> := [ \t]+
smcw := whitespace?, (semicolon_comment / c_comment, whitespace?)+
'''
class TransitFileProcessor(DispatchProcessor):
""" Class to process transit files
"""
def __init__(self, verbosity=1):
self.verbosity=verbosity
self.lines = []
self.links = []
self.pnrs = []
self.zacs = []
self.accesslis = []
self.xferlis = []
self.liType = ''
self.supplinks = []
self.endcomments = []
def crackTags(self, leaf, buffer):
tag = leaf[0]
text = buffer[leaf[1]:leaf[2]]
subtags = leaf[3]
b = []
if subtags:
for leaf in subtags:
b.append(self.crackTags(leaf, buffer))
return (tag,text,b)
def line(self, (tag,start,stop,subtags), buffer):
# this is the whole line
if self.verbosity>=1:
print tag, start, stop
# Append list items for this line
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.lines.append(xxx)
if self.verbosity==2:
# lines are composed of smcw (semicolon-comment / whitespace), line_attr and lin_node
for linepart in subtags:
print " ",linepart[0], " -> [ ",
for partpart in linepart[3]:
print partpart[0], "(", buffer[partpart[1]:partpart[2]],")",
print " ]"
def link(self, (tag,start,stop,subtags), buffer):
# this is the whole link
if self.verbosity>=1:
print tag, start, stop
# Append list items for this link
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.links.append(xxx)
if self.verbosity==2:
# links are composed of smcw and link_attr
for linkpart in subtags:
print " ",linkpart[0], " -> [ ",
for partpart in linkpart[3]:
print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")",
print " ]"
def pnr(self, (tag,start,stop,subtags), buffer):
if self.verbosity>=1:
print tag, start, stop
# Append list items for this link
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.pnrs.append(xxx)
if self.verbosity==2:
# pnrs are composed of smcw and pnr_attr
for pnrpart in subtags:
print " ",pnrpart[0], " -> [ ",
for partpart in pnrpart[3]:
print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")",
print " ]"
def zac(self, (tag,start,stop,subtags), buffer):
if self.verbosity>=1:
print tag, start, stop
if self.verbosity==2:
# zacs are composed of smcw and zac_attr
for zacpart in subtags:
print " ",zacpart[0], " -> [ ",
for partpart in zacpart[3]:
print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")",
print " ]"
# Append list items for this link
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.zacs.append(xxx)
def supplink(self, (tag,start,stop,subtags), buffer):
if self.verbosity>=1:
print tag, start, stop
if self.verbosity==2:
# supplinks are composed of smcw and zac_attr
for supplinkpart in subtags:
print " ",supplinkpart[0], " -> [ ",
for partpart in supplinkpart[3]:
print partpart[0], "(", buffer[partpart[1]:partpart[2]], ")",
print " ]"
# Append list items for this link
# TODO: make the others more like this -- let the list separate the parse structures!
supplink = []
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
supplink.append(xxx)
self.supplinks.append(supplink)
def smcw(self, (tag,start,stop,subtags), buffer):
""" Semicolon comment whitespace
"""
if self.verbosity>=1:
print tag, start, stop
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
self.endcomments.append(xxx)
def accessli(self, (tag,start,stop,subtags), buffer):
if self.verbosity>=1:
print tag, start, stop
for leaf in subtags:
xxx = self.crackTags(leaf,buffer)
if self.liType=="access":
self.accesslis.append(xxx)
elif self.liType=="xfer":
self.xferlis.append(xxx)
else:
raise NetworkException("Found access or xfer link without classification")
class TransitParser(Parser):
def __init__(self, filedef=transit_file_def, verbosity=1):
Parser.__init__(self, filedef)
self.verbosity=verbosity
self.tfp = TransitFileProcessor(self.verbosity)
def buildProcessor(self):
return self.tfp
def convertLineData(self):
""" Convert the parsed tree of data into a usable python list of transit lines
returns list of comments and transit line objects
"""
rows = []
currentRoute = None
for line in self.tfp.lines:
# Each line is a 3-tuple: key, value, list-of-children.
# Add comments as simple strings
if line[0] == 'smcw':
cmt = line[1].strip()
if not cmt==';;<<Trnbuild>>;;':
rows.append(cmt)
continue
# Handle Line attributes
if line[0] == 'lin_attr':
key = None
value = None
comment = None
# Pay attention only to the children of lin_attr elements
kids = line[2]
for child in kids:
if child[0]=='lin_attr_name': key=child[1]
if child[0]=='attr_value': value=child[1]
if child[0]=='semicolon_comment': comment=child[1].strip()
# If this is a NAME attribute, we need to start a new TransitLine!
if key=='NAME':
if currentRoute:
rows.append(currentRoute)
currentRoute = TransitLine(name=value)
else:
currentRoute[key] = value # Just store all other attributes
# And save line comment if there is one
if comment: currentRoute.comment = comment
continue
# Handle Node list
if line[0] == "lin_node":
# Pay attention only to the children of lin_attr elements
kids = line[2]
node = None
for child in kids:
if child[0]=='nodenum':
node = Node(child[1])
if child[0]=='lin_nodeattr':
key = None
value = None
for nodechild in child[2]:
if nodechild[0]=='lin_nodeattr_name': key = nodechild[1]
if nodechild[0]=='attr_value': value = nodechild[1]
if nodechild[0]=='semicolon_comment': comment=nodechild[1].strip()
node[key] = value
if comment: node.comment = comment
currentRoute.n.append(node)
continue
# Got something other than lin_node, lin_attr, or smcw:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (line[0], line[1]))
# End of tree; store final route and return
if currentRoute: rows.append(currentRoute)
return rows
def convertLinkData(self):
""" Convert the parsed tree of data into a usable python list of transit lines
returns list of comments and transit line objects
"""
rows = []
currentLink = None
key = None
value = None
comment = None
for link in self.tfp.links:
# Each link is a 3-tuple: key, value, list-of-children.
# Add comments as simple strings:
if link[0] in ('smcw','semicolon_comment'):
if currentLink:
currentLink.comment = " "+link[1].strip() # Link comment
rows.append(currentLink)
currentLink = None
else:
rows.append(link[1].strip()) # Line comment
continue
# Link records
if link[0] == 'link_attr':
# Pay attention only to the children of lin_attr elements
kids = link[2]
for child in kids:
if child[0] in ('link_attr_name','word_nodes','word_modes'):
key = child[1]
# If this is a NAME attribute, we need to start a new TransitLink.
if key in ('nodes','NODES'):
if currentLink: rows.append(currentLink)
currentLink = TransitLink() # Create new dictionary for this transit support link
if child[0]=='nodepair':
currentLink.setId(child[1])
if child[0] in ('attr_value','numseq'):
currentLink[key] = child[1]
continue
# Got something unexpected:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (link[0], link[1]))
# Save last link too
if currentLink: rows.append(currentLink)
return rows
def convertPNRData(self):
""" Convert the parsed tree of data into a usable python list of PNR objects
returns list of strings and PNR objects
"""
rows = []
currentPNR = None
key = None
value = None
for pnr in self.tfp.pnrs:
# Each pnr is a 3-tuple: key, value, list-of-children.
# Add comments as simple strings
# Textline Comments
if pnr[0] =='smcw':
# Line comment; thus existing PNR must be finished.
if currentPNR:
rows.append(currentPNR)
currentPNR = None
rows.append(pnr[1].strip()) # Append line-comment
continue
# PNR records
if pnr[0] == 'pnr_attr':
# Pay attention only to the children of attr elements
kids = pnr[2]
for child in kids:
if child[0] in ('pnr_attr_name','word_node','word_zones'):
key = child[1]
# If this is a NAME attribute, we need to start a new PNR.
if key in ('node','NODE'):
if currentPNR:
rows.append(currentPNR)
currentPNR = PNRLink() # Create new dictionary for this PNR
if child[0]=='nodepair' or child[0]=='nodenum':
#print "child[0]/[1]",child[0],child[1]
currentPNR.id = child[1]
currentPNR.parseID()
if child[0] in ('attr_value','numseq'):
currentPNR[key.upper()] = child[1]
if child[0]=='semicolon_comment':
currentPNR.comment = ' '+child[1].strip()
continue
# Got something unexpected:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (pnr[0], pnr[1]))
# Save last link too
if currentPNR: rows.append(currentPNR)
return rows
def convertZACData(self):
""" Convert the parsed tree of data into a usable python list of ZAC objects
returns list of strings and ZAC objects
"""
rows = []
currentZAC = None
key = None
value = None
for zac in self.tfp.zacs:
# Each zac is a 3-tuple: key, value, list-of-children.
# Add comments as simple strings
# Textline Comments
if zac[0] in ('smcw','semicolon_comment'):
if currentZAC:
currentZAC.comment = ' '+zac[1].strip()
rows.append(currentZAC)
currentZAC = None
else:
rows.append(zac[1].strip()) # Append value
continue
# Link records
if zac[0] == 'zac_attr':
# Pay attention only to the children of lin_attr elements
kids = zac[2]
for child in kids:
if child[0]=='nodepair':
# Save old ZAC
if currentZAC: rows.append(currentZAC)
# Start new ZAC
currentZAC = ZACLink() # Create new dictionary for this ZAC.
currentZAC.id=child[1]
if child[0] =='zac_attr_name':
key = child[1]
if child[0]=='attr_value':
currentZAC[key] = child[1]
continue
# Got something unexpected:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (zac[0], zac[1]))
# Save last link too
if currentZAC: rows.append(currentZAC)
return rows
def convertLinkiData(self, linktype):
""" Convert the parsed tree of data into a usable python list of ZAC objects
returns list of strings and ZAC objects
"""
rows = []
currentLinki = None
key = None
value = None
linkis = []
if linktype=="access":
linkis=self.tfp.accesslis
elif linktype=="xfer":
linkis=self.tfp.xferlis
else:
raise NetworkException("ConvertLinkiData with invalid linktype")
for accessli in linkis:
# whitespace?, smcw?, nodenumA, spaces?, nodenumB, spaces?, (float/int)?, spaces?, semicolon_comment?
if accessli[0]=='smcw':
rows.append(accessli[1].strip())
elif accessli[0]=='nodenumA':
currentLinki = Linki()
rows.append(currentLinki)
currentLinki.A = accessli[1].strip()
elif accessli[0]=='nodenumB':
currentLinki.B = accessli[1].strip()
elif accessli[0]=='float':
currentLinki.distance = accessli[1].strip()
elif accessli[0]=='int':
currentLinki.xferTime = accessli[1].strip()
elif accessli[0]=='semicolon_comment':
currentLinki.comment = accessli[1].strip()
elif accessli[0]=='accesstag':
currentLinki.accessType = accessli[1].strip()
else:
# Got something unexpected:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (accessli[0], accessli[1]))
return rows
def convertSupplinksData(self):
""" Convert the parsed tree of data into a usable python list of Supplink objects
returns list of strings and Supplink objects
"""
rows = []
currentSupplink = None
key = None
value = None
for supplink in self.tfp.supplinks:
# Supplink records are lists
if currentSupplink: rows.append(currentSupplink)
currentSupplink = Supplink() # Create new dictionary for this PNR
for supplink_attr in supplink:
if supplink_attr[0] == 'supplink_attr':
if supplink_attr[2][0][0]=='supplink_attr_name':
currentSupplink[supplink_attr[2][0][1]] = supplink_attr[2][1][1]
elif supplink_attr[2][0][0]=='nodepair':
currentSupplink.setId(supplink_attr[2][0][1])
else:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (supplink[0], supplink[1]))
raise
elif supplink_attr[0] == "semicolon_comment":
currentSupplink.comment = supplink_attr[1].strip()
elif supplink_attr[0] == 'smcw':
currentSupplink.comment = supplink_attr[1].strip()
else:
WranglerLogger.critical("** SHOULD NOT BE HERE: %s (%s)" % (supplink[0], supplink[1]))
raise
# Save last link too
if currentSupplink: rows.append(currentSupplink)
return rows
|
gpl-3.0
| -808,136,675,958,228,500
| 38.817669
| 165
| 0.514186
| false
| 4.110012
| false
| false
| false
|
summerzhangft/summer
|
article/models.py
|
1
|
1104
|
from django.db import models
from tag.models import Tag
from mistune import markdown
from django.utils import timezone
from django.contrib.auth.models import User
class Article(models.Model):
title = models.CharField(max_length=100)
raw_content = models.TextField(blank=True)
tags = models.ManyToManyField(Tag)
author = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
background = models.CharField(null=True,max_length=500)
description = models.CharField(max_length=200,null=True)
vote = models.IntegerField(default=0)
pub_date=models.DateTimeField(editable=False)
@property
def render_content(self):
return markdown(self.raw_content)
@property
def pub_time_format(self):
return self.pub_date.strftime('%B %d, %Y')
def save(self,*args,**kwargs):
if not self.pub_date:
self.pub_date=timezone.now()
super(Article,self).save(*args,**kwargs)
def __str__(self):
return self.title
class Meta:
ordering = ('-pub_date',)
# Create your models here.
|
gpl-3.0
| -3,490,386,367,264,015,000
| 29.666667
| 73
| 0.669384
| false
| 3.820069
| false
| false
| false
|
etoki/0726_biginning
|
landingPage/bootstrap/settings.py
|
1
|
3048
|
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8zadu3+@^3*glz12%eyx1v4rbe0f)^0%2l-x923jg!p&7*40%('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bootstrap.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates"),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.static',
],
},
},
]
WSGI_APPLICATION = 'bootstrap.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = ''
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
|
mit
| -7,993,397,726,805,836,000
| 25.982301
| 91
| 0.678806
| false
| 3.51963
| false
| false
| false
|
liavkoren/djangoDev
|
tests/generic_views/urls.py
|
1
|
12651
|
from django.conf.urls import url
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from . import models
from . import views
urlpatterns = [
# TemplateView
url(r'^template/no_template/$',
TemplateView.as_view()),
url(r'^template/simple/(?P<foo>\w+)/$',
TemplateView.as_view(template_name='generic_views/about.html')),
url(r'^template/custom/(?P<foo>\w+)/$',
views.CustomTemplateView.as_view(template_name='generic_views/about.html')),
url(r'^template/content_type/$',
TemplateView.as_view(template_name='generic_views/robots.txt', content_type='text/plain')),
url(r'^template/cached/(?P<foo>\w+)/$',
cache_page(2.0)(TemplateView.as_view(template_name='generic_views/about.html'))),
# DetailView
url(r'^detail/obj/$',
views.ObjectDetail.as_view()),
url(r'^detail/artist/(?P<pk>\d+)/$',
views.ArtistDetail.as_view(),
name="artist_detail"),
url(r'^detail/author/(?P<pk>\d+)/$',
views.AuthorDetail.as_view(),
name="author_detail"),
url(r'^detail/author/bycustompk/(?P<foo>\d+)/$',
views.AuthorDetail.as_view(pk_url_kwarg='foo')),
url(r'^detail/author/byslug/(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/bycustomslug/(?P<foo>[\w-]+)/$',
views.AuthorDetail.as_view(slug_url_kwarg='foo')),
url(r'^detail/author/(?P<pk>\d+)/template_name_suffix/$',
views.AuthorDetail.as_view(template_name_suffix='_view')),
url(r'^detail/author/(?P<pk>\d+)/template_name/$',
views.AuthorDetail.as_view(template_name='generic_views/about.html')),
url(r'^detail/author/(?P<pk>\d+)/context_object_name/$',
views.AuthorDetail.as_view(context_object_name='thingy')),
url(r'^detail/author/(?P<pk>\d+)/dupe_context_object_name/$',
views.AuthorDetail.as_view(context_object_name='object')),
url(r'^detail/page/(?P<pk>\d+)/field/$',
views.PageDetail.as_view()),
url(r'^detail/author/invalid/url/$',
views.AuthorDetail.as_view()),
url(r'^detail/author/invalid/qs/$',
views.AuthorDetail.as_view(queryset=None)),
url(r'^detail/nonmodel/1/$',
views.NonModelDetail.as_view()),
url(r'^detail/doesnotexist/(?P<pk>\d+)/$',
views.ObjectDoesNotExistDetail.as_view()),
# FormView
url(r'^contact/$',
views.ContactView.as_view()),
# Create/UpdateView
url(r'^edit/artists/create/$',
views.ArtistCreate.as_view()),
url(r'^edit/artists/(?P<pk>\d+)/update/$',
views.ArtistUpdate.as_view()),
url(r'^edit/authors/create/naive/$',
views.NaiveAuthorCreate.as_view()),
url(r'^edit/authors/create/redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/authors/create/')),
url(r'^edit/authors/create/interpolate_redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/author/%(id)d/update/')),
url(r'^edit/authors/create/restricted/$',
views.AuthorCreateRestricted.as_view()),
url(r'^edit/authors/create/$',
views.AuthorCreate.as_view()),
url(r'^edit/authors/create/special/$',
views.SpecializedAuthorCreate.as_view()),
url(r'^edit/author/(?P<pk>\d+)/update/naive/$',
views.NaiveAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>\d+)/update/redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/authors/create/')),
url(r'^edit/author/(?P<pk>\d+)/update/interpolate_redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/author/%(id)d/update/')),
url(r'^edit/author/(?P<pk>\d+)/update/$',
views.AuthorUpdate.as_view()),
url(r'^edit/author/update/$',
views.OneAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>\d+)/update/special/$',
views.SpecializedAuthorUpdate.as_view()),
url(r'^edit/author/(?P<pk>\d+)/delete/naive/$',
views.NaiveAuthorDelete.as_view()),
url(r'^edit/author/(?P<pk>\d+)/delete/redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/')),
url(r'^edit/author/(?P<pk>\d+)/delete/interpolate_redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/?deleted=%(id)s')),
url(r'^edit/author/(?P<pk>\d+)/delete/$',
views.AuthorDelete.as_view()),
url(r'^edit/author/(?P<pk>\d+)/delete/special/$',
views.SpecializedAuthorDelete.as_view()),
# ArchiveIndexView
url(r'^dates/books/$',
views.BookArchive.as_view()),
url(r'^dates/books/context_object_name/$',
views.BookArchive.as_view(context_object_name='thingies')),
url(r'^dates/books/allow_empty/$',
views.BookArchive.as_view(allow_empty=True)),
url(r'^dates/books/template_name/$',
views.BookArchive.as_view(template_name='generic_views/list.html')),
url(r'^dates/books/template_name_suffix/$',
views.BookArchive.as_view(template_name_suffix='_detail')),
url(r'^dates/books/invalid/$',
views.BookArchive.as_view(queryset=None)),
url(r'^dates/books/paginated/$',
views.BookArchive.as_view(paginate_by=10)),
url(r'^dates/books/reverse/$',
views.BookArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
url(r'^dates/books/by_month/$',
views.BookArchive.as_view(date_list_period='month')),
url(r'^dates/booksignings/$',
views.BookSigningArchive.as_view()),
# ListView
url(r'^list/dict/$',
views.DictList.as_view()),
url(r'^list/dict/paginated/$',
views.DictList.as_view(paginate_by=1)),
url(r'^list/artists/$',
views.ArtistList.as_view(),
name="artists_list"),
url(r'^list/authors/$',
views.AuthorList.as_view(),
name="authors_list"),
url(r'^list/authors/paginated/$',
views.AuthorList.as_view(paginate_by=30)),
url(r'^list/authors/paginated/(?P<page>\d+)/$',
views.AuthorList.as_view(paginate_by=30)),
url(r'^list/authors/paginated-orphaned/$',
views.AuthorList.as_view(paginate_by=30, paginate_orphans=2)),
url(r'^list/authors/notempty/$',
views.AuthorList.as_view(allow_empty=False)),
url(r'^list/authors/notempty/paginated/$',
views.AuthorList.as_view(allow_empty=False, paginate_by=2)),
url(r'^list/authors/template_name/$',
views.AuthorList.as_view(template_name='generic_views/list.html')),
url(r'^list/authors/template_name_suffix/$',
views.AuthorList.as_view(template_name_suffix='_objects')),
url(r'^list/authors/context_object_name/$',
views.AuthorList.as_view(context_object_name='author_list')),
url(r'^list/authors/dupe_context_object_name/$',
views.AuthorList.as_view(context_object_name='object_list')),
url(r'^list/authors/invalid/$',
views.AuthorList.as_view(queryset=None)),
url(r'^list/authors/paginated/custom_class/$',
views.AuthorList.as_view(paginate_by=5, paginator_class=views.CustomPaginator)),
url(r'^list/authors/paginated/custom_page_kwarg/$',
views.AuthorList.as_view(paginate_by=30, page_kwarg='pagina')),
url(r'^list/authors/paginated/custom_constructor/$',
views.AuthorListCustomPaginator.as_view()),
# YearArchiveView
# Mixing keyword and positional captures below is intentional; the views
# ought to be able to accept either.
url(r'^dates/books/(?P<year>\d{4})/$',
views.BookYearArchive.as_view()),
url(r'^dates/books/(?P<year>\d{4})/make_object_list/$',
views.BookYearArchive.as_view(make_object_list=True)),
url(r'^dates/books/(?P<year>\d{4})/allow_empty/$',
views.BookYearArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>\d{4})/allow_future/$',
views.BookYearArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>\d{4})/paginated/$',
views.BookYearArchive.as_view(make_object_list=True, paginate_by=30)),
url(r'^dates/books/no_year/$',
views.BookYearArchive.as_view()),
url(r'^dates/books/(?P<year>\d{4})/reverse/$',
views.BookYearArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
url(r'^dates/booksignings/(?P<year>\d{4})/$',
views.BookSigningYearArchive.as_view()),
# MonthArchiveView
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/$',
views.BookMonthArchive.as_view()),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/$',
views.BookMonthArchive.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/allow_empty/$',
views.BookMonthArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/allow_future/$',
views.BookMonthArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/paginated/$',
views.BookMonthArchive.as_view(paginate_by=30)),
url(r'^dates/books/(?P<year>\d{4})/no_month/$',
views.BookMonthArchive.as_view()),
url(r'^dates/booksignings/(?P<year>\d{4})/(?P<month>[a-z]{3})/$',
views.BookSigningMonthArchive.as_view()),
# WeekArchiveView
url(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/$',
views.BookWeekArchive.as_view()),
url(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/allow_empty/$',
views.BookWeekArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/allow_future/$',
views.BookWeekArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/paginated/$',
views.BookWeekArchive.as_view(paginate_by=30)),
url(r'^dates/books/(?P<year>\d{4})/week/no_week/$',
views.BookWeekArchive.as_view()),
url(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/monday/$',
views.BookWeekArchive.as_view(week_format='%W')),
url(r'^dates/booksignings/(?P<year>\d{4})/week/(?P<week>\d{1,2})/$',
views.BookSigningWeekArchive.as_view()),
# DayArchiveView
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/$',
views.BookDayArchive.as_view()),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/$',
views.BookDayArchive.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/allow_empty/$',
views.BookDayArchive.as_view(allow_empty=True)),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/allow_future/$',
views.BookDayArchive.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/allow_empty_and_future/$',
views.BookDayArchive.as_view(allow_empty=True, allow_future=True)),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/paginated/$',
views.BookDayArchive.as_view(paginate_by=True)),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/no_day/$',
views.BookDayArchive.as_view()),
url(r'^dates/booksignings/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/$',
views.BookSigningDayArchive.as_view()),
# TodayArchiveView
url(r'^dates/books/today/$',
views.BookTodayArchive.as_view()),
url(r'^dates/books/today/allow_empty/$',
views.BookTodayArchive.as_view(allow_empty=True)),
url(r'^dates/booksignings/today/$',
views.BookSigningTodayArchive.as_view()),
# DateDetailView
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookDetail.as_view()),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookDetail.as_view(month_format='%m')),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/allow_future/$',
views.BookDetail.as_view(allow_future=True)),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/nopk/$',
views.BookDetail.as_view()),
url(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/byslug/(?P<slug>[\w-]+)/$',
views.BookDetail.as_view()),
url(r'^dates/books/get_object_custom_queryset/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookDetailGetObjectCustomQueryset.as_view()),
url(r'^dates/booksignings/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookSigningDetail.as_view()),
# Useful for testing redirects
url(r'^accounts/login/$', 'django.contrib.auth.views.login')
]
|
bsd-3-clause
| 3,350,932,227,050,310,700
| 47.471264
| 118
| 0.617343
| false
| 3.024384
| false
| false
| false
|
sbergot/python
|
efront/repo.py
|
1
|
2666
|
import logging
import re
import os
from efront import iohelpers as io
DEV_DIR = r"c:\dev4.1"
ROOTS = [
r"c:\svn",
r"c:\git",
]
def get_current_target():
regex = re.compile("<JUNCTION> +dev4\.1 \[(.*)\]")
matches = []
def get_match(line):
m = regex.search(str(line))
if m is None:
return
matches.append(m.group(1))
io.cmd("dir", cwd="c:\\", logger=get_match)
assert len(matches) == 1, "multiple junctions found: {}".format(matches)
return matches[0]
def remove_junction(junction_path):
io.cmd("rmdir {}".format(junction_path), logger=logging.debug)
def create_junction(dev_dir, srcdir):
logging.info("creating a junction to the repository between {} and {}".format(dev_dir, srcdir))
io.cmd("mklink /J {} {}".format(dev_dir, os.path.abspath(srcdir)), logger=logging.debug)
def switch(srcdir):
if os.path.exists(DEV_DIR):
remove_junction(DEV_DIR)
create_junction(DEV_DIR, srcdir)
if os.path.exists(os.path.join(DEV_DIR, "Switch.cmd")):
logging.info("Running Switch.cmd")
io.cmd("Switch.cmd", cwd=DEV_DIR, logger=logging.getLogger("Switch.cmd").debug)
def find_src_dir(path):
true_dirs = filter(os.path.exists, [os.path.join(root, path) for root in ROOTS] + [os.path.abspath(path)])
true_dirs = list(set(true_dirs))
if len(true_dirs) == 0:
raise Exception("{} not found".format(path))
if len(true_dirs) > 1:
print("\n".join("{} - {}".format(i, p) for i, p in enumerate(true_dirs)))
selection = int(raw_input("please select source: "))
else:
selection = 0
return true_dirs[selection]
class Target:
root_names = list(map(os.path.basename, ROOTS))
root_names.sort()
def __init__(self, name):
self.name = name
self.srcs = set()
def add(self, root):
self.srcs.add(os.path.basename(root))
def _get_src(self, root):
return root if root in self.srcs else " " * len(root)
def __str__(self):
return " ".join([self._get_src(root) for root in self.root_names] + [self.name])
def list_dirs(log):
log("available dirs:")
dirs = {}
for root in ROOTS:
for dirname in os.listdir(root):
if not os.path.exists(os.path.join(root, dirname, "msbuild_RSK.bat")):
continue
if not dirname in dirs:
dirs[dirname] = Target(dirname)
dirs[dirname].add(root)
dirs_list = list(set(dirs))
dirs_list.sort()
for dirname in dirs_list:
log(str(dirs[dirname]))
|
bsd-3-clause
| 7,354,850,230,155,652,000
| 29.738095
| 110
| 0.582146
| false
| 3.366162
| false
| false
| false
|
hydrogo/hydropy
|
hydropy/baseflow.py
|
1
|
2682
|
# -*- coding: utf-8 -*-
"""
Hydropy package
@author: Stijn Van Hoey
"""
def get_baseflow_chapman(flowserie, recession_time):
"""
Parameters
----------
flowserie : pd.TimeSeries
River discharge flowserie
recession_time : float [0-1]
recession constant
Notes
------
$$Q_b(i) = \frac{k}{2-k}Q_b(i-1) + \frac{1-k}{2-k}Q(i)$$
"""
if not isinstance(flowserie, pd.TimeSeries):
raise Exception("Not a pd.TimeSerie as input")
secterm = (1.-recession_time)*flowserie/(2.-recession_time)
baseflow = np.empty(flowserie.shape[0])
for i, timestep in enumerate(baseflow):
if i == 0:
baseflow[i] = 0.0
else:
baseflow[i] = recession_time*baseflow[i-1]/(2.-recession_time) + \
secterm.values[i]
return pd.TimeSeries(baseflow, index = flowserie.index)
def get_baseflow_boughton(flowserie, recession_time, baseflow_index):
"""
Parameters
----------
flowserie : pd.TimeSeries
River discharge flowserie
recession_time : float [0-1]
recession constant
baseflow_index : float
Notes
------
$$Q_b(i) = \frac{k}{1+C}Q_b(i-1) + \frac{C}{1+C}Q(i)$$
"""
if not isinstance(flowserie, pd.TimeSeries):
raise Exception("Not a pd.TimeSerie as input")
parC = baseflow_index
secterm = parC*flowserie/(1 + parC)
baseflow = np.empty(flowserie.shape[0])
for i, timestep in enumerate(baseflow):
if i == 0:
baseflow[i] = 0.0
else:
baseflow[i] = recession_time*baseflow[i-1]/(1 + parC) + \
secterm.values[i]
return pd.TimeSeries(baseflow, index = flowserie.index)
def get_baseflow_ihacres(flowserie, recession_time, baseflow_index, alfa):
"""
Parameters
----------
flowserie : pd.TimeSeries
River discharge flowserie
recession_time : float [0-1]
recession constant
Notes
------
$$Q_b(i) = \frac{k}{1+C}Q_b(i-1) + \frac{C}{1+C}[Q(i)+\alpha Q(i-1)]$$
$\alpha$ < 0.
"""
if not isinstance(flowserie, pd.TimeSeries):
raise Exception("Not a pd.TimeSerie as input")
parC = baseflow_index
secterm = parC/(1 + parC)
baseflow = np.empty(flowserie.shape[0])
for i, timestep in enumerate(baseflow):
if i == 0:
baseflow[i] = 0.0
else:
baseflow[i] = recession_time*baseflow[i-1]/(1 + parC) + \
secterm*(flowserie.values[i] + \
alfa*flowserie.values[i-1])
return pd.TimeSeries(baseflow, index = flowserie.index)
|
bsd-2-clause
| -665,022,005,729,298,800
| 25.048544
| 78
| 0.557047
| false
| 3.058153
| false
| false
| false
|
mvaled/sentry
|
src/sentry/south_migrations/0291_merge_legacy_releases.py
|
1
|
100956
|
# -*- coding: utf-8 -*-
import re
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import IntegrityError, models, transaction
def is_full_sha(version):
# sha1 or md5
return bool(re.match(r'[a-f0-9]{40}$', version) or re.match(r'[a-f0-9]{32}$', version))
def is_short_sha(version):
# short sha
return bool(re.match(r'[a-f0-9]{7,40}$', version))
def is_semver_like(version):
return bool(re.match(r'([a-z]*)(\-)?v?(?:\d+\.)*\d+', version))
def is_travis_build(version):
# TRAVIS_12345
return bool(re.match(r'(travis)(\_|\-)([a-f0-9]{1,40}$)', version, re.IGNORECASE))
def is_jenkins_build(version):
# jenkins-123-abcdeff
return bool(
re.match(r'(jenkins)(\_|\-)([0-9]{1,40})(\_|\-)([a-f0-9]{5,40}$)', version, re.IGNORECASE)
)
def is_head_tag(version):
# HEAD-abcdefg, master@abcdeff, master(abcdeff)
return bool(
re.match(r'(head|master|qa)(\_|\-|\@|\()([a-f0-9]{6,40})(\)?)$', version, re.IGNORECASE)
)
def is_short_sha_and_date(version):
# abcdefg-2016-03-16
return bool(re.match(r'([a-f0-9]{7,40})-(\d{4})-(\d{2})-(\d{2})', version))
def is_word_and_date(version):
# release-2016-01-01
return bool(re.match(r'([a-z]*)-(\d{4})-(\d{2})-(\d{2})', version))
def merge(to_release, from_releases, sentry_models):
# The following models reference release:
# ReleaseCommit.release
# ReleaseEnvironment.release_id
# ReleaseProject.release
# GroupRelease.release_id
# GroupResolution.release
# Group.first_release
# ReleaseFile.release
model_list = (
sentry_models.ReleaseCommit, sentry_models.ReleaseEnvironment, sentry_models.ReleaseFile,
sentry_models.ReleaseProject, sentry_models.GroupRelease, sentry_models.GroupResolution
)
for release in from_releases:
for model in model_list:
if hasattr(model, 'release'):
update_kwargs = {'release': to_release}
else:
update_kwargs = {'release_id': to_release.id}
try:
with transaction.atomic():
model.objects.filter(release_id=release.id).update(**update_kwargs)
except IntegrityError:
for item in model.objects.filter(release_id=release.id):
try:
with transaction.atomic():
model.objects.filter(id=item.id).update(**update_kwargs)
except IntegrityError:
item.delete()
sentry_models.Group.objects.filter(first_release=release).update(first_release=to_release)
release.delete()
def update_version(release, sentry_models):
old_version = release.version
try:
project_slug = release.projects.values_list('slug', flat=True)[0]
except IndexError:
# delete releases if they have no projects
release.delete()
return
new_version = ('%s-%s' % (project_slug, old_version))[:64]
sentry_models.Release.objects.filter(id=release.id).update(version=new_version)
sentry_models.TagValue.objects.filter(
project__in=release.projects.all(), key='sentry:release', value=old_version
).update(value=new_version)
class Migration(DataMigration):
def forwards(self, orm):
db.commit_transaction()
dupe_releases = orm.Release.objects.values_list('version', 'organization_id')\
.annotate(vcount=models.Count('id'))\
.filter(vcount__gt=1)
for version, org_id in dupe_releases:
releases = list(
orm.Release.objects.filter(organization_id=org_id, version=version)
.order_by('date_added')
)
releases_with_files = list(
orm.ReleaseFile.objects.filter(
release__in=releases).values_list(
'release_id', flat=True).distinct()
)
# if multiple releases have files, just rename them
# instead of trying to merge
if len(releases_with_files) > 1:
for release in releases:
update_version(release, orm)
continue
if len(releases_with_files) == 1:
from_releases = []
for release in releases:
if release.id == releases_with_files[0]:
to_release = release
else:
from_releases.append(release)
else:
to_release = releases[0]
from_releases = releases[1:]
if is_full_sha(version):
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
affected_projects = set()
for release in releases:
affected_projects.update(
[p for p in release.projects.values_list('slug', flat=True)]
)
has_prod = False
has_staging = False
has_dev = False
for p in affected_projects:
if 'prod' in p:
has_prod = True
elif 'stag' in p or 'stg' in p:
has_staging = True
elif 'dev' in p:
has_dev = True
# assume projects are split by environment if there
# are at least prod/staging or prod/dev, etc
projects_split_by_env = len([x for x in [has_prod, has_dev, has_staging] if x]) >= 2
# compare date_added
date_diff = None
dates = [release.date_added for release in releases]
if dates:
diff = (max(dates) - min(dates)).total_seconds()
if date_diff is None or diff > date_diff:
date_diff = diff
if is_short_sha(version) or \
is_head_tag(version) or \
is_short_sha_and_date(version):
# if projects are across multiple environments, allow 1 week difference
if projects_split_by_env and date_diff and date_diff < 604800:
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
# +/- 8 hours
if date_diff and date_diff > 28800:
for release in releases:
update_version(release, orm)
else:
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
if is_semver_like(version):
# check ref string and urls
refs = {release.ref for release in releases}
urls = {release.url for release in releases}
if (len(refs) == 1 and None not in refs) or (len(urls) == 1 and None not in urls):
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
# if projects are across multiple environments, allow 1 week difference
if projects_split_by_env and date_diff and date_diff < 604800:
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
# +/- 30 mins
if date_diff and date_diff > 1800:
for release in releases:
update_version(release, orm)
else:
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
if len(version) >= 20 or is_travis_build(version) or \
is_jenkins_build(version) or \
is_word_and_date(version):
# if projects are across multiple environments, allow 1 week difference
if projects_split_by_env and date_diff and date_diff < 604800:
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
# +/- 4 hours
if date_diff and date_diff > 14400:
for release in releases:
update_version(release, orm)
else:
merge(to_release=to_release, from_releases=from_releases, sentry_models=orm)
continue
# if we made it this far, assume we should just rename
for release in releases:
update_version(release, orm)
db.start_transaction()
def backwards(self, orm):
"Write your backwards methods here."
pass
models = {
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.apitoken': {
'Meta': {
'object_name': 'ApiToken'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True'
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'token':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_actors'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'actor_key': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.ApiKey']",
'null': 'True',
'blank': 'True'
}
),
'actor_label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authenticator': {
'Meta': {
'unique_together': "(('user', 'type'),)",
'object_name': 'Authenticator',
'db_table': "'auth_authenticator'"
},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_expires': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime(2017, 2, 2, 0, 0)',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'upstream_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
)
},
'sentry.broadcastseen': {
'Meta': {
'unique_together': "(('broadcast', 'user'),)",
'object_name': 'BroadcastSeen'
},
'broadcast': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Broadcast']"
}
),
'date_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.commit': {
'Meta': {
'unique_together': "(('repository_id', 'key'),)",
'object_name': 'Commit',
'index_together': "(('repository_id', 'date_added'),)"
},
'author': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.CommitAuthor']",
'null': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'message': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'repository_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {
'unique_together': "(('organization_id', 'email'),)",
'object_name': 'CommitAuthor'
},
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.commitfilechange': {
'Meta': {
'unique_together': "(('commit', 'filename'),)",
'object_name': 'CommitFileChange'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'filename': ('django.db.models.fields.CharField', [], {
'max_length': '255'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '1'
})
},
'sentry.counter': {
'Meta': {
'object_name': 'Counter',
'db_table': "'sentry_projectcounter'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'unique': 'True'
}
),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {
'object_name': 'DSymBundle'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'sdk': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymSDK']"
}
)
},
'sentry.dsymobject': {
'Meta': {
'object_name': 'DSymObject'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_path': ('django.db.models.fields.TextField', [], {
'db_index': 'True'
}),
'uuid':
('django.db.models.fields.CharField', [], {
'max_length': '36',
'db_index': 'True'
}),
'vmaddr':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'vmsize':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
})
},
'sentry.dsymsdk': {
'Meta': {
'object_name':
'DSymSDK',
'index_together':
"[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"
},
'dsym_type':
('django.db.models.fields.CharField', [], {
'max_length': '20',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'sdk_name': ('django.db.models.fields.CharField', [], {
'max_length': '20'
}),
'version_build': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {
'unique_together': "[('object', 'address')]",
'object_name': 'DSymSymbol'
},
'address':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.DSymObject']"
}
),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {
'unique_together': "(('project_id', 'name'),)",
'object_name': 'Environment'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group_id', 'datetime'),)"
},
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project_id', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {
'unique_together':
"(('event_id', 'key_id', 'value_id'),)",
'object_name':
'EventTag',
'index_together':
"(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {
'unique_together':
"(('project', 'ident'), ('project', 'hash'))",
'object_name':
'EventUser',
'index_together':
"(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'username':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
})
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'legacy_blob'",
'null': 'True',
'to': "orm['sentry.FileBlob']"
}
),
'blobs': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.FileBlob']",
'through': "orm['sentry.FileBlobIndex']",
'symmetrical': 'False'
}
),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.fileblob': {
'Meta': {
'object_name': 'FileBlob'
},
'checksum':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '40'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
)
},
'sentry.fileblobindex': {
'Meta': {
'unique_together': "(('file', 'blob', 'offset'),)",
'object_name': 'FileBlobIndex'
},
'blob': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.FileBlob']"
}
),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {
'object_name': 'GlobalDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '36'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'short_id'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'",
'index_together': "(('project', 'first_release'),)"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']",
'null': 'True',
'on_delete': 'models.PROTECT'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'short_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupemailthread': {
'Meta': {
'unique_together': "(('email', 'group'), ('email', 'msgid'))",
'object_name': 'GroupEmailThread'
},
'date': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'msgid': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'groupemail_set'",
'to': "orm['sentry.Project']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {
'object_name': 'GroupRedirect'
},
'group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'previous_group_id':
('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'unique': 'True'
})
},
'sentry.grouprelease': {
'Meta': {
'unique_together': "(('group_id', 'release_id', 'environment'),)",
'object_name': 'GroupRelease'
},
'environment':
('django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64'
}),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.groupresolution': {
'Meta': {
'object_name': 'GroupResolution'
},
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.groupsnooze': {
'Meta': {
'object_name': 'GroupSnooze'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'unique': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {
'unique_together': "(('group', 'user'),)",
'object_name': 'GroupSubscription'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'subscription_set'",
'to': "orm['sentry.Project']"
}
),
'reason':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('group', 'key', 'value'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'",
'index_together': "(('project', 'key', 'value', 'last_seen'),)"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationavatar': {
'Meta': {
'object_name': 'OrganizationAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.Organization']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'role':
('django.db.models.fields.CharField', [], {
'default': "'member'",
'max_length': '32'
}),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'token': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True',
'blank': 'True'
}
),
'type': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationonboardingtask': {
'Meta': {
'unique_together': "(('organization', 'task'),)",
'object_name': 'OrganizationOnboardingTask'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_completed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.organizationoption': {
'Meta': {
'unique_together': "(('organization', 'key'),)",
'object_name': 'OrganizationOption',
'db_table': "'sentry_organizationoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'first_event': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'forced_color': (
'django.db.models.fields.CharField', [], {
'max_length': '6',
'null': 'True',
'blank': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectbookmark': {
'Meta': {
'unique_together': "(('project_id', 'user'),)",
'object_name': 'ProjectBookmark'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project_id': (
'sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.projectdsymfile': {
'Meta': {
'unique_together': "(('project', 'uuid'),)",
'object_name': 'ProjectDSymFile'
},
'cpu_name': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'uuid': ('django.db.models.fields.CharField', [], {
'max_length': '36'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {
'unique_together': "(('project_id', 'platform'),)",
'object_name': 'ProjectPlatform'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'platform': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project_id', 'version'),)",
'object_name': 'Release'
},
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_released':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'date_started':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'blank': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True',
'blank': 'True'
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'releases'",
'symmetrical': 'False',
'through': "orm['sentry.ReleaseProject']",
'to': "orm['sentry.Project']"
}
),
'ref': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'url': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasecommit': {
'Meta': {
'unique_together': "(('release', 'commit'), ('release', 'order'))",
'object_name': 'ReleaseCommit'
},
'commit': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Commit']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'db_index': 'True'
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseenvironment': {
'Meta': {
'unique_together': "(('project_id', 'release_id', 'environment_id'),)",
'object_name': 'ReleaseEnvironment',
'db_table': "'sentry_environmentrelease'"
},
'environment_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'first_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'project_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'release_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
)
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'project_id':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.releaseproject': {
'Meta': {
'unique_together': "(('project', 'release'),)",
'object_name': 'ReleaseProject',
'db_table': "'sentry_release_project'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'new_groups': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.repository': {
'Meta': {
'unique_together':
"(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))",
'object_name':
'Repository'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'external_id':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization_id': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'db_index': 'True'
}
),
'provider':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'url': ('django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True'
})
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.savedsearch': {
'Meta': {
'unique_together': "(('project', 'name'),)",
'object_name': 'SavedSearch'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_default': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {
'unique_together': "(('project', 'user'),)",
'object_name': 'SavedSearchUserDefault',
'db_table': "'sentry_savedsearch_userdefault'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'savedsearch': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.SavedSearch']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_password_expired':
('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'name': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'db_column': "'first_name'",
'blank': 'True'
}
),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'session_nonce':
('django.db.models.fields.CharField', [], {
'max_length': '12',
'null': 'True'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useravatar': {
'Meta': {
'object_name': 'UserAvatar'
},
'avatar_type':
('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
}),
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']",
'unique': 'True',
'null': 'True',
'on_delete': 'models.SET_NULL'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': (
'django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32',
'db_index': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'avatar'",
'unique': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.useremail': {
'Meta': {
'unique_together': "(('user', 'email'),)",
'object_name': 'UserEmail'
},
'date_hash_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_verified': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'emails'",
'to': "orm['sentry.User']"
}
),
'validation_hash': (
'django.db.models.fields.CharField', [], {
'default': "u'eoUxyDO82qJrLEXmZNPgefpGSvdT4CsY'",
'max_length': '32'
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'UserReport',
'index_together': "(('project', 'event_id'), ('project', 'date_added'))"
},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
}
}
complete_apps = ['sentry']
symmetrical = True
|
bsd-3-clause
| 510,566,170,111,118,700
| 35.953148
| 98
| 0.40688
| false
| 4.680606
| false
| false
| false
|
hacchy/MetaVelvet
|
scripts/scriptEstimatedCovMulti.py
|
1
|
7954
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Evaluate peaks of coverage from a kmer coverage file (e.g.
Graph2-stats.txt)'''
import sys
import math
# Define functions
def num(s):
'''Take a string representing and convert it to a number. Return
a float or int as appropriate. An exception is raised if the
string did not represent a number.'''
try:
return int(s)
except ValueError:
return float(s)
def importStats(fin_stats):
dicStats = {}
listHeader = []
while True:
line = fin_stats.readline()
# Exit after last line
if not line:
break
# Skip empty line
line = line.rstrip()
if not line:
continue
lineFields = line.split("\t")
if len(dicStats) == 0:
# Process header line
listHeader = lineFields
for header in listHeader:
dicStats[header] = []
else:
# Process line containing coverage values
listStats = lineFields
for i in range(len(lineFields)):
stats = num(listStats[i])
dicStats[listHeader[i]].append(stats)
return dicStats
def weightedHisto(dicStats, xMin, xMax, binWidth):
dicHisto = {}
listShort1Cov = dicStats["short1_cov"]
listLgth = dicStats["lgth"]
for x in range(xMin, xMax, binWidth):
dicHisto[x] = 0
for i in range(len(listShort1Cov)):
cov = listShort1Cov[i]
if cov < xMin or cov >= xMax:
continue
for x in range(xMin, xMax+binWidth, binWidth):
if (cov >= x and cov < x + binWidth):
dicHisto[x] += listLgth[i]
return dicHisto
def smoothingHisto(dicHisto, xMin, xMax, binWidth, widthMovAve):
dicSmoothHisto = {}
listMovAve = []
for x in range(xMin, xMax, binWidth):
listMovAve.append(dicHisto[x])
if len(listMovAve) < widthMovAve:
continue
dicSmoothHisto[x - binWidth * ((widthMovAve - 1) / 2)] \
= sum(listMovAve) / float(widthMovAve)
listMovAve.pop(0)
return dicSmoothHisto
def printHisto(dicHisto, xMin, xMax, binWidth):
print "Histogram :"
for x in range(xMin, xMax, binWidth):
#print str(x) + " : " + str(int(round(dicHisto[x], 0)))
lenBar = int(round((dicHisto[x] / 20000), 0)) - 1
print str(x) + "\t",
for i in range(lenBar):
print "=",
print "\n",
print "\n",
def setXMax(xMax, binWidth):
return int((math.floor(xMax / binWidth)) * binWidth)
def getFirstXMax(dicStats, binWidth, thresConLen):
listLgth = dicStats["lgth"]
listShort1Cov = dicStats["short1_cov"]
maxCov = 0
subMaxCov = 0
for i in range(len(listLgth)):
if listLgth[i] >= thresConLen:
if listShort1Cov[i] > maxCov:
subMaxCov = maxCov
maxCov = listShort1Cov[i]
xMax = setXMax(subMaxCov, binWidth) + binWidth * 5
return xMax
def getN50(tupleConLen):
listSortedConLen = list(tupleConLen)
listSortedConLen.sort()
listSortedConLen.reverse()
totalLen = sum(listSortedConLen)
sumLen = 0
for i in range(len(listSortedConLen)):
sumLen += listSortedConLen[i]
if sumLen >= totalLen / 2:
return listSortedConLen[i]
return -1
def setWidthByXMax(xMax):
listWidth = [0, 0] # [binWidth, widthMovAve]
if xMax > 300:
listWidth = [6, 5]
if xMax <= 300:
listWidth = [4, 3]
if xMax <= 120:
listWidth = [2, 3]
if xMax <= 100:
listWidth = [1, 1]
return listWidth
def detectPeakPandS(dicHisto, xMin, xMax, binWidth, thresHeight,
listPeakPandS):
countIncrease = 0; thresIncrease = 3
countDecrease = 0; thresDecrease = 3
beforeHeight = -1
flagPeakStart = False
peakHeight = 0; peakCov = 0
for x in range(xMax - binWidth, xMin - binWidth, -1 * binWidth):
if beforeHeight == -1:
beforeHeight = dicHisto[x]
continue
if not flagPeakStart:
if dicHisto[x] >= thresHeight:
if dicHisto[x] >= beforeHeight:
countIncrease += 1
if countIncrease >= thresIncrease:
countIncrease = 0
flagPeakStart = True
beforeHeight = dicHisto[x]
if flagPeakStart:
if dicHisto[x] >= peakHeight:
peakHeight = dicHisto[x]
peakCov = x
else:
countDecrease += 1
if countDecrease >= thresDecrease:
for i in range(2):
if listPeakPandS[i] == -1:
tmpBias = float(binWidth) / 2
listPeakPandS[i] = peakCov + tmpBias
peakHeight = 0; peakCov = 0
break
if listPeakPandS[1] != -1:
return listPeakPandS
countDecrease = 0
flagPeakStart = False
return listPeakPandS
def printPeaks(listPeak):
print "Peaks :"
print listPeak
strList = []
for value in listPeak:
strList.append(str(value))
print '_'.join(strList)
def check_args():
'''Check that an argument was provided or complain and exit.
Return the name of the file to use'''
if len(sys.argv) != 2:
script_name = sys.argv[0]
print 'Usage: %s <Graph2_stats_file>' % (sys.argv[0])
sys.exit(1)
return sys.argv[1]
def main(stats_file):
# Import stats file
fin_stats = open(stats_file, "r")
dicStats = importStats(fin_stats)
# Make weighted histogram
listPeak = []
xMin = 0
xMax = 1000
binWidth = 4
widthMovAve = 5
listPeakPandS = [-1, -1]
N50 = 0
thresHeight = 0
thresConLen = 0
while True:
# Get N50
if len(listPeak) == 0:
N50 = getN50(tuple(dicStats["lgth"]))
print "N50 : " + str(N50)
thresConLen = N50 * 5
# Get first xMax
if len(listPeak) == 0:
xMax = getFirstXMax(dicStats, binWidth, thresConLen)
print "First xMax : " + str(xMax)
# Set width and xMax
listWidth = setWidthByXMax(xMax)
binWidth = listWidth[0]; widthMovAve = listWidth[1]
xMax = setXMax(xMax, binWidth)
# Make weighted and smoothed histogram
xMin = 0
dicHisto = weightedHisto(dicStats, xMin, xMax, binWidth)
dicSmoothHisto = smoothingHisto(dicHisto, xMin, xMax,
binWidth, widthMovAve)
xMin += binWidth * ((widthMovAve - 1) / 2)
xMax -= binWidth * ((widthMovAve - 1) / 2)
# Get thresHeight
if len(listPeak) == 0:
thresHeight = dicSmoothHisto[xMax - binWidth]
print "Thres Height : " + str(thresHeight)
# Print histogram
if len(listPeak) == 0:
printHisto(dicSmoothHisto, xMin, xMax, binWidth)
# Detect (primary and) secondary peak
listPeakPandS = detectPeakPandS(dicSmoothHisto, xMin, xMax, binWidth,
thresHeight, listPeakPandS)
# Record peak
if len(listPeak) == 0:
listPeak.append(listPeakPandS[0])
listPeak.append(listPeakPandS[1])
# When couldn't detect secondary peak, break
if listPeakPandS[1] == -1:
listPeak.pop(-1)
printPeaks(listPeak)
break
# Prepare for next peak
listPeakPandS[0] = listPeakPandS[1]
listPeakPandS[1] = -1
xMax = listPeakPandS[0]
if __name__ == "__main__":
stats_file = check_args()
main(stats_file)
|
gpl-2.0
| 4,155,654,913,366,671,000
| 26.714286
| 78
| 0.544129
| false
| 3.507055
| false
| false
| false
|
balta2ar/coursera-dl
|
coursera/api.py
|
1
|
18829
|
# vim: set fileencoding=utf8 :
"""
This module contains implementations of different APIs that are used by the
downloader.
"""
import os
import json
import logging
from six import iterkeys, iteritems
from six.moves.urllib_parse import quote_plus
from .utils import (BeautifulSoup, make_coursera_absolute_url,
extend_supplement_links)
from .network import get_page
from .define import (OPENCOURSE_SUPPLEMENT_URL,
OPENCOURSE_PROGRAMMING_ASSIGNMENTS_URL,
OPENCOURSE_ASSET_URL,
OPENCOURSE_ASSETS_URL,
OPENCOURSE_API_ASSETS_V1_URL,
OPENCOURSE_VIDEO_URL)
class CourseraOnDemand(object):
"""
This is a class that provides a friendly interface to extract certain
parts of on-demand courses. On-demand class is a new format that Coursera
is using, they contain `/learn/' in their URLs. This class does not support
old-style Coursera classes. This API is by no means complete.
"""
def __init__(self, session, course_id):
"""
Initialize Coursera OnDemand API.
@param session: Current session that holds cookies and so on.
@type session: requests.Session
@param course_id: Course ID from course json.
@type course_id: str
"""
self._session = session
self._course_id = course_id
def extract_links_from_lecture(self,
video_id, subtitle_language='en',
resolution='540p', assets=None):
"""
Return the download URLs of on-demand course video.
@param video_id: Video ID.
@type video_id: str
@param subtitle_language: Subtitle language.
@type subtitle_language: str
@param resolution: Preferred video resolution.
@type resolution: str
@param assets: List of assets that may present in the video.
@type assets: [str]
@return: @see CourseraOnDemand._extract_links_from_text
"""
if assets is None:
assets = []
links = self._extract_videos_and_subtitles_from_lecture(
video_id, subtitle_language, resolution)
assets = self._normalize_assets(assets)
extend_supplement_links(
links, self._extract_links_from_lecture_assets(assets))
return links
def _normalize_assets(self, assets):
"""
Perform asset normalization. For some reason, assets that are sometimes
present in lectures, have "@1" at the end of their id. Such "uncut"
asset id when fed to OPENCOURSE_ASSETS_URL results in error that says:
"Routing error: 'get-all' not implemented". To avoid that, the last
two characters from asset id are cut off and after that that method
works fine. It looks like, Web UI is doing the same.
@param assets: List of asset ids.
@type assets: [str]
@return: Normalized list of asset ids (without trailing "@1")
@rtype: [str]
"""
new_assets = []
for asset in assets:
# For example: giAxucdaEeWJTQ5WTi8YJQ@1
if len(asset) == 24:
# Turn it into: giAxucdaEeWJTQ5WTi8YJQ
asset = asset[:-2]
new_assets.append(asset)
return new_assets
def _extract_links_from_lecture_assets(self, asset_ids):
"""
Extract links to files of the asset ids.
@param asset_ids: List of asset ids.
@type asset_ids: [str]
@return: @see CourseraOnDemand._extract_links_from_text
"""
links = {}
def _add_asset(name, url, destination):
filename, extension = os.path.splitext(name)
if extension is '':
return
extension = extension.lower().strip('.')
basename = os.path.basename(filename)
if extension not in destination:
destination[extension] = []
destination[extension].append((url, basename))
for asset_id in asset_ids:
for asset in self._get_asset_urls(asset_id):
_add_asset(asset['name'], asset['url'], links)
return links
def _get_asset_urls(self, asset_id):
"""
Get list of asset urls and file names. This method may internally
use _get_open_course_asset_urls to extract `asset` element types.
@param asset_id: Asset ID.
@type asset_id: str
@return List of dictionaries with asset file names and urls.
@rtype [{
'name': '<filename.ext>'
'url': '<url>'
}]
"""
url = OPENCOURSE_ASSETS_URL.format(id=asset_id)
page = get_page(self._session, url)
logging.debug('Parsing JSON for asset_id <%s>.', asset_id)
dom = json.loads(page)
urls = []
for element in dom['elements']:
typeName = element['typeName']
definition = element['definition']
# Elements of `asset` types look as follows:
#
# {'elements': [{'definition': {'assetId': 'gtSfvscoEeW7RxKvROGwrw',
# 'name': 'Презентация к лекции'},
# 'id': 'phxNlMcoEeWXCQ4nGuQJXw',
# 'typeName': 'asset'}],
# 'linked': None,
# 'paging': None}
#
if typeName == 'asset':
open_course_asset_id = definition['assetId']
for asset in self._get_open_course_asset_urls(open_course_asset_id):
urls.append({'name': asset['name'],
'url': asset['url']})
# Elements of `url` types look as follows:
#
# {'elements': [{'definition': {'name': 'What motivates you.pptx',
# 'url': 'https://d396qusza40orc.cloudfront.net/learning/Powerpoints/2-4A_What_motivates_you.pptx'},
# 'id': '0hixqpWJEeWQkg5xdHApow',
# 'typeName': 'url'}],
# 'linked': None,
# 'paging': None}
#
elif typeName == 'url':
urls.append({'name': definition['name'],
'url': definition['url']})
else:
logging.warning(
'Unknown asset typeName: %s\ndom: %s\n'
'If you think the downloader missed some '
'files, please report the issue here:\n'
'https://github.com/coursera-dl/coursera-dl/issues/new',
typeName, json.dumps(dom, indent=4))
return urls
def _get_open_course_asset_urls(self, asset_id):
"""
Get list of asset urls and file names. This method only works
with asset_ids extracted internally by _get_asset_urls method.
@param asset_id: Asset ID.
@type asset_id: str
@return List of dictionaries with asset file names and urls.
@rtype [{
'name': '<filename.ext>'
'url': '<url>'
}]
"""
url = OPENCOURSE_API_ASSETS_V1_URL.format(id=asset_id)
page = get_page(self._session, url)
dom = json.loads(page)
# Structure is as follows:
# elements [ {
# name
# url {
# url
return [{'name': element['name'],
'url': element['url']['url']}
for element in dom['elements']]
def _extract_videos_and_subtitles_from_lecture(self,
video_id,
subtitle_language='en',
resolution='540p'):
url = OPENCOURSE_VIDEO_URL.format(video_id=video_id)
page = get_page(self._session, url)
logging.debug('Parsing JSON for video_id <%s>.', video_id)
video_content = {}
dom = json.loads(page)
# videos
logging.info('Gathering video URLs for video_id <%s>.', video_id)
sources = dom['sources']
sources.sort(key=lambda src: src['resolution'])
sources.reverse()
# Try to select resolution requested by the user.
filtered_sources = [source
for source in sources
if source['resolution'] == resolution]
if len(filtered_sources) == 0:
# We will just use the 'vanilla' version of sources here, instead of
# filtered_sources.
logging.warn('Requested resolution %s not available for <%s>. '
'Downloading highest resolution available instead.',
resolution, video_id)
else:
logging.info('Proceeding with download of resolution %s of <%s>.',
resolution, video_id)
sources = filtered_sources
video_url = sources[0]['formatSources']['video/mp4']
video_content['mp4'] = video_url
# subtitles and transcripts
subtitle_nodes = [
('subtitles', 'srt', 'subtitle'),
('subtitlesTxt', 'txt', 'transcript'),
]
for (subtitle_node, subtitle_extension, subtitle_description) in subtitle_nodes:
logging.info('Gathering %s URLs for video_id <%s>.', subtitle_description, video_id)
subtitles = dom.get(subtitle_node)
if subtitles is not None:
if subtitle_language == 'all':
for current_subtitle_language in subtitles:
video_content[current_subtitle_language + '.' + subtitle_extension] = make_coursera_absolute_url(subtitles.get(current_subtitle_language))
else:
if subtitle_language != 'en' and subtitle_language not in subtitles:
logging.warning("%s unavailable in '%s' language for video "
"with video id: [%s], falling back to 'en' "
"%s", subtitle_description.capitalize(), subtitle_language, video_id, subtitle_description)
subtitle_language = 'en'
subtitle_url = subtitles.get(subtitle_language)
if subtitle_url is not None:
# some subtitle urls are relative!
video_content[subtitle_language + '.' + subtitle_extension] = make_coursera_absolute_url(subtitle_url)
lecture_video_content = {}
for key, value in iteritems(video_content):
lecture_video_content[key] = [(value, '')]
return lecture_video_content
def extract_links_from_programming(self, element_id):
"""
Return a dictionary with links to supplement files (pdf, csv, zip,
ipynb, html and so on) extracted from graded programming assignment.
@param element_id: Element ID to extract files from.
@type element_id: str
@return: @see CourseraOnDemand._extract_links_from_text
"""
logging.info('Gathering supplement URLs for element_id <%s>.', element_id)
# Assignment text (instructions) contains asset tags which describe
# supplementary files.
text = ''.join(self._extract_assignment_text(element_id))
if not text:
return {}
supplement_links = self._extract_links_from_text(text)
return supplement_links
def extract_links_from_supplement(self, element_id):
"""
Return a dictionary with supplement files (pdf, csv, zip, ipynb, html
and so on) extracted from supplement page.
@return: @see CourseraOnDemand._extract_links_from_text
"""
logging.info('Gathering supplement URLs for element_id <%s>.', element_id)
url = OPENCOURSE_SUPPLEMENT_URL.format(
course_id=self._course_id, element_id=element_id)
page = get_page(self._session, url)
dom = json.loads(page)
supplement_content = {}
# Supplement content has structure as follows:
# 'linked' {
# 'openCourseAssets.v1' [ {
# 'definition' {
# 'value'
for asset in dom['linked']['openCourseAssets.v1']:
value = asset['definition']['value']
# Supplement lecture types are known to contain both <asset> tags
# and <a href> tags (depending on the course), so we extract
# both of them.
extend_supplement_links(
supplement_content, self._extract_links_from_text(value))
return supplement_content
def _extract_asset_tags(self, text):
"""
Extract asset tags from text into a convenient form.
@param text: Text to extract asset tags from. This text contains HTML
code that is parsed by BeautifulSoup.
@type text: str
@return: Asset map.
@rtype: {
'<id>': {
'name': '<name>',
'extension': '<extension>'
},
...
}
"""
soup = BeautifulSoup(text)
asset_tags_map = {}
for asset in soup.find_all('asset'):
asset_tags_map[asset['id']] = {'name': asset['name'],
'extension': asset['extension']}
return asset_tags_map
def _extract_asset_urls(self, asset_ids):
"""
Extract asset URLs along with asset ids.
@param asset_ids: List of ids to get URLs for.
@type assertn: [str]
@return: List of dictionaries with asset URLs and ids.
@rtype: [{
'id': '<id>',
'url': '<url>'
}]
"""
ids = quote_plus(','.join(asset_ids))
url = OPENCOURSE_ASSET_URL.format(ids=ids)
page = get_page(self._session, url)
dom = json.loads(page)
return [{'id': element['id'],
'url': element['url']}
for element in dom['elements']]
def _extract_assignment_text(self, element_id):
"""
Extract assignment text (instructions).
@param element_id: Element id to extract assignment instructions from.
@type element_id: str
@return: List of assignment text (instructions).
@rtype: [str]
"""
url = OPENCOURSE_PROGRAMMING_ASSIGNMENTS_URL.format(
course_id=self._course_id, element_id=element_id)
page = get_page(self._session, url)
dom = json.loads(page)
return [element['submissionLearnerSchema']['definition']
['assignmentInstructions']['definition']['value']
for element in dom['elements']]
def _extract_links_from_text(self, text):
"""
Extract supplement links from the html text. Links may be provided
in two ways:
1. <a> tags with href attribute
2. <asset> tags with id attribute (requires additional request
to get the direct URL to the asset file)
@param text: HTML text.
@type text: str
@return: Dictionary with supplement links grouped by extension.
@rtype: {
'<extension1>': [
('<link1>', '<title1>'),
('<link2>', '<title2')
],
'extension2': [
('<link3>', '<title3>'),
('<link4>', '<title4>')
],
...
}
"""
supplement_links = self._extract_links_from_a_tags_in_text(text)
extend_supplement_links(
supplement_links,
self._extract_links_from_asset_tags_in_text(text))
return supplement_links
def _extract_links_from_asset_tags_in_text(self, text):
"""
Scan the text and extract asset tags and links to corresponding
files.
@param text: Page text.
@type text: str
@return: @see CourseraOnDemand._extract_links_from_text
"""
# Extract asset tags from instructions text
asset_tags_map = self._extract_asset_tags(text)
ids = list(iterkeys(asset_tags_map))
if not ids:
return {}
# asset tags contain asset names and ids. We need to make another
# HTTP request to get asset URL.
asset_urls = self._extract_asset_urls(ids)
supplement_links = {}
# Build supplement links, providing nice titles along the way
for asset in asset_urls:
title = asset_tags_map[asset['id']]['name']
extension = asset_tags_map[asset['id']]['extension']
if extension not in supplement_links:
supplement_links[extension] = []
supplement_links[extension].append((asset['url'], title))
return supplement_links
def _extract_links_from_a_tags_in_text(self, text):
"""
Extract supplement links from the html text that contains <a> tags
with href attribute.
@param text: HTML text.
@type text: str
@return: Dictionary with supplement links grouped by extension.
@rtype: {
'<extension1>': [
('<link1>', '<title1>'),
('<link2>', '<title2')
],
'extension2': [
('<link3>', '<title3>'),
('<link4>', '<title4>')
]
}
"""
soup = BeautifulSoup(text)
links = [item['href']
for item in soup.find_all('a') if 'href' in item.attrs]
links = sorted(list(set(links)))
supplement_links = {}
for link in links:
filename, extension = os.path.splitext(link)
# Some courses put links to sites in supplement section, e.g.:
# http://pandas.pydata.org/
if extension is '':
continue
# Make lowercase and cut the leading/trailing dot
extension = extension.lower().strip('.')
basename = os.path.basename(filename)
if extension not in supplement_links:
supplement_links[extension] = []
# Putting basename into the second slot of the tuple is important
# because that will allow to download many supplements within a
# single lecture, e.g.:
# 01_slides-presented-in-this-module.pdf
# 01_slides-presented-in-this-module_Dalal-cvpr05.pdf
# 01_slides-presented-in-this-module_LM-3dtexton.pdf
supplement_links[extension].append((link, basename))
return supplement_links
|
lgpl-3.0
| -3,989,550,405,040,123,400
| 35.244701
| 162
| 0.545691
| false
| 4.291809
| false
| false
| false
|
William-Hai/SimpleDemo-python
|
file/csv/demo_csv.py
|
1
|
1083
|
# encoding=utf-8
__author__ = 'Q-Whai'
'''
DESC: 与CSV文件操作相关测试Demo
Blog: http://blog.csdn.net/lemon_tree12138
Create Date: 2016/2/25
Last Modify: 2016/3/9
version: 0.0.1
'''
import csv
# ----------------------------------------- #
# 读取CSV文件中的内容 #
# ----------------------------------------- #
def read_csv(file_path):
reader = csv.reader(file(file_path, 'rb'))
for line in reader:
print(line)
# ----------------------------------------- #
# 向CSV文件中写入指定内容 #
# ----------------------------------------- #
def write_csv(file_path, data):
writer = csv.writer(file(file_path, 'wb'))
for data_raw in data:
writer.writerow(data_raw)
# ----------------------------------------- #
# 程序入口 #
# ----------------------------------------- #
if __name__ == '__main__':
data = [['0', '1', '2'], ['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']]
write_csv('F:/Temp/a.csv', data)
read_csv('F:/Temp/a.csv')
|
gpl-3.0
| 102,617,643,984,817,540
| 25.179487
| 79
| 0.371205
| false
| 3.103343
| false
| false
| false
|
Fluent-networks/floranet
|
floranet/test/unit/floranet/test_netserver.py
|
1
|
11517
|
import os
import base64
import time
from random import randrange
from mock import patch, MagicMock
from twisted.trial import unittest
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet import reactor, protocol
from twisted.internet.udp import Port
from twistar.registry import Registry
from floranet.lora.wan import LoraWAN, Rxpk
from floranet.netserver import NetServer
import floranet.lora.mac as lora_mac
from floranet.models.model import Model
from floranet.models.config import Config
from floranet.models.gateway import Gateway
from floranet.models.device import Device
from floranet.models.application import Application
import floranet.test.unit.mock_dbobject as mockDBObject
import floranet.test.unit.mock_model as mockModel
from floranet.test.unit.mock_reactor import reactorCall
class NetServerTest(unittest.TestCase):
@inlineCallbacks
def setUp(self):
"""Test setup. Creates a new NetServer
Use factory default configuration.
"""
Registry.getConfig = MagicMock(return_value=None)
# Get factory default configuration
with patch.object(Model, 'save', MagicMock()):
config = yield Config.loadFactoryDefaults()
self.server = NetServer(config)
def _test_device(self):
"""Create a test device object """
return Device(
deveui=int('0x0F0E0E0D00010209', 16),
devaddr=int('0x06000001', 16),
appeui=int('0x0A0B0C0D0A0B0C0D', 16),
nwkskey=int('0xAEB48D4C6E9EA5C48C37E4F132AA8516', 16),
appskey=int('0x7987A96F267F0A86B739EED480FC2B3C', 16),
adr= True,
tx_chan=3,
tx_datr='SF7BW125',
gw_addr='192.168.1.125',
enabled = True)
def test_checkDevaddr(self):
"""Test checkDevaddr method"""
# Check valid address
device = self._test_device()
result = self.server.checkDevaddr(device.devaddr)
self.assertTrue(result)
# Check invalid address
devaddr = int('0x11223344', 16)
result = self.server.checkDevaddr(devaddr)
self.assertFalse(result)
@inlineCallbacks
def test_getOTAADevAddrs(self):
"""Test getOTAADevAddrs method"""
device = self._test_device()
mockDBObject.return_value = device
expected = [[], [device.devaddr]]
results = []
# Test when no devices are found
with patch.object(Device, 'find', classmethod(mockDBObject.findFail)):
result = yield self.server._getOTAADevAddrs()
results.append(result)
# Test when one device is found
with patch.object(Device, 'find', classmethod(mockDBObject.findOne)):
result = yield self.server._getOTAADevAddrs()
results.append(result)
self.assertEqual(expected, results)
@inlineCallbacks
def test_getFreeOTAAddress(self):
expected = [self.server.config.otaastart,
self.server.config.otaastart+1,
self.server.config.otaaend, None]
results = []
# Test with empty OTA device list
# Mock the server method to return the devaddr list
with patch.object(self.server, '_getOTAADevAddrs',
MagicMock(return_value=[])):
result = yield self.server._getFreeOTAAddress()
results.append(result)
# Test with one OTA device
with patch.object(self.server, '_getOTAADevAddrs', MagicMock(
return_value=[self.server.config.otaastart])):
result = yield self.server._getFreeOTAAddress()
results.append(result)
# Test with last address only available
with patch.object(self.server, '_getOTAADevAddrs',MagicMock(
return_value=xrange(self.server.config.otaastart,
self.server.config.otaaend))):
result = yield self.server._getFreeOTAAddress()
results.append(result)
# Test with no address available
with patch.object(self.server, '_getOTAADevAddrs',MagicMock(
return_value=xrange(self.server.config.otaastart,
self.server.config.otaaend + 1))):
result = yield self.server._getFreeOTAAddress()
results.append(result)
self.assertEqual(expected, results)
@inlineCallbacks
def test_getActiveDevice(self):
# Include for coverage. We are essentially testing a returnValue() call.
device = self._test_device()
mockDBObject.return_value = device
expected = device.deveui
with patch.object(Device, 'find', classmethod(mockDBObject.findSuccess)):
result = yield self.server._getActiveDevice(device.devaddr)
self.assertEqual(expected, result.deveui)
def test_checkDuplicateMessage(self):
m = lora_mac.MACDataMessage()
m.mic = 1111
self.server.config.duplicateperiod = 10
expected = [True, False]
result = []
now = time.time()
# Test a successful find of the duplicate
for i in (1,10):
self.server.message_cache.append((randrange(1,1000), now - i))
self.server.message_cache.append(
(m.mic, now - self.server.config.duplicateperiod + 1))
result.append(self.server._checkDuplicateMessage(m))
# Test an unsuccessful find of the duplicate - the message's
# cache period has expired.
self.server.message_cache.remove(
(m.mic, now - self.server.config.duplicateperiod + 1))
self.server.message_cache.append(
(m.mic, now - self.server.config.duplicateperiod - 1))
result.append(self.server._checkDuplicateMessage(m))
self.assertEqual(expected, result)
def test_cleanMessageCache(self):
self.server.config.duplicateperiod = 10
# Create 10 cache entries, remove 5
now = time.time()
for i in range(1,21,2):
self.server.message_cache.append((i, now - i))
expected = 5
self.server._cleanMessageCache()
result = len(self.server.message_cache)
self.assertEqual(expected, result)
def test_manageMACCommandQueue(self):
self.server.config.macqueuelimit = 10
# Create 10 cache entries, remove 5
now = time.time()
for i in range(1,21,2):
self.server.commands.append((int(now - i), i, lora_mac.LinkCheckAns()))
expected = 5
self.server._manageMACCommandQueue()
result = len(self.server.commands)
self.assertEqual(expected, result)
@inlineCallbacks
def test_processADRRequests(self):
device = self._test_device()
device.snr_average = 3.5
device.adr_datr = None
# Test we set adr_datr device attribute properly
expected = ['SF9BW125', False]
results = []
mockDBObject.return_value = [device]
mockModel.mock_object = device
with patch.object(Device, 'all', classmethod(mockDBObject.all)), \
patch.object(device, 'update', mockModel.update), \
patch.object(self.server, '_sendLinkADRRequest', MagicMock()):
# Remove any delays
self.server.config.adrmessagetime = 0
yield self.server._processADRRequests()
results.append(device.adr_datr)
results.append(self.server.adrprocessing)
self.assertEqual(expected, results)
def _createCommands(self):
datarate = 'SF7BW125'
chmask = int('FF', 16)
return [lora_mac.LinkCheckAns(), lora_mac.LinkADRReq(datarate, 0, chmask, 6, 0)]
def test_queueMACCommand(self):
device = self._test_device()
commands = self._createCommands()
expected = [2, lora_mac.LINKCHECKANS, lora_mac.LINKADRREQ]
for c in commands:
self.server._queueMACCommand(device.deveui, c)
result = [len(self.server.commands), self.server.commands[0][2].cid,
self.server.commands[1][2].cid]
self.assertEqual(expected, result)
def test_dequeueMACCommand(self):
device = self._test_device()
commands = self._createCommands()
for c in commands:
self.server._queueMACCommand(device.deveui, c)
self.server._dequeueMACCommand(device.deveui, commands[1])
expected = [1, lora_mac.LINKCHECKANS]
result = [len(self.server.commands), self.server.commands[0][2].cid]
self.assertEqual(expected, result)
def test_scheduleDownlinkTime(self):
offset = 10
tmst = randrange(0, 4294967295 - 10000000)
expected = [tmst + 10000000, 5000000]
result = []
result.append(self.server._scheduleDownlinkTime(tmst, offset))
tmst = 4294967295 - 5000000
result.append(self.server._scheduleDownlinkTime(tmst, offset))
self.assertEqual(expected, result)
def test_txpkResponse(self):
self.server.lora = LoraWAN(self)
self.server.lora.addGateway(Gateway(host='192.168.1.125', name='Test',
enabled=True, power=26))
tmst = randrange(0, 4294967295)
rxpk = Rxpk(tmst=tmst, chan=3, freq=915.8, datr='SF7BW125',
data="n/uSwM0LIED8X6QV0mJMjC6oc2HOWFpCfmTry", size=54)
device = self._test_device()
device.rx = self.server.band.rxparams((rxpk.chan, rxpk.datr), join=False)
gateway = self.server.lora.gateway(device.gw_addr)
expected = [(True, device.rx[1]['freq'], device.rx[1]['datr']),
(True, device.rx[2]['freq'], device.rx[2]['datr']),
(tmst + 1000000, device.rx[1]['freq'], device.rx[1]['datr']),
(tmst + 2000000, device.rx[2]['freq'], device.rx[2]['datr'])]
result = []
txpk = self.server._txpkResponse(device, rxpk.data, gateway, tmst, immediate=True)
for i in range(1,3):
result.append((txpk[i].imme, txpk[i].freq, txpk[i].datr))
txpk = self.server._txpkResponse(device, rxpk.data, gateway, tmst, immediate=False)
for i in range(1,3):
result.append((txpk[i].tmst, txpk[i].freq, txpk[i].datr))
self.assertEqual(expected, result)
def _processJoinRequest(self, request):
"""Called by test_processJoinRequest_pass and
test_processJoinRequest_fail"""
device = self._test_device()
app = self.server.config.apps[0]
# Passing join request
request = base64.b64decode("AA0MCwoNDAsKAwIBAA0ODg9IklIgzCM=")
msg = lora_mac.MACMessage.decode(request)
result = yield self.server._processJoinRequest(msg, app, device)
self.assertTrue(result)
# Failing join request
request = base64.b64decode("AA0MCwoNDAsKAwIBAA0ODg9IklIgzCX=")
msg = lora_mac.MACMessage.decode(request)
result = yield self.server._processJoinRequest(msg, app, device)
self.assertFalse(result)
|
mit
| 8,204,927,493,061,594,000
| 35.916667
| 91
| 0.60554
| false
| 3.892193
| true
| false
| false
|
npyoung/python-neo
|
neo/io/axonio.py
|
1
|
32534
|
# -*- coding: utf-8 -*-
"""
Classe for reading data from pCLAMP and AxoScope
files (.abf version 1 and 2), developed by Molecular device/Axon technologies.
- abf = Axon binary file
- atf is a text file based format from axon that could be
read by AsciiIO (but this file is less efficient.)
This code is a port of abfload and abf2load
written in Matlab (BSD-2-Clause licence) by :
- Copyright (c) 2009, Forrest Collman, fcollman@princeton.edu
- Copyright (c) 2004, Harald Hentschke
and available here :
http://www.mathworks.com/matlabcentral/fileexchange/22114-abf2load
Information on abf 1 and 2 formats is available here :
http://www.moleculardevices.com/pages/software/developer_info.html
This file supports the old (ABF1) and new (ABF2) format.
ABF1 (clampfit <=9) and ABF2 (clampfit >10)
All possible mode are possible :
- event-driven variable-length mode 1 -> return several Segments per Block
- event-driven fixed-length mode 2 or 5 -> return several Segments
- gap free mode -> return one (or sevral) Segment in the Block
Supported : Read
Author: sgarcia, jnowacki
Note: j.s.nowacki@gmail.com has a C++ library with SWIG bindings which also
reads abf files - would be good to cross-check
"""
import struct
import datetime
import os
from io import open, BufferedReader
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
from neo.core import *
from neo.io.tools import iteritems
class struct_file(BufferedReader):
def read_f(self, fmt, offset=None):
if offset is not None:
self.seek(offset)
return struct.unpack(fmt, self.read(struct.calcsize(fmt)))
def write_f(self, fmt, offset=None, *args):
if offset is not None:
self.seek(offset)
self.write(struct.pack(fmt, *args))
def reformat_integer_V1(data, nbchannel, header):
"""
reformat when dtype is int16 for ABF version 1
"""
chans = [chan_num for chan_num in
header['nADCSamplingSeq'] if chan_num >= 0]
for n, i in enumerate(chans[:nbchannel]): # respect SamplingSeq
data[:, n] /= header['fInstrumentScaleFactor'][i]
data[:, n] /= header['fSignalGain'][i]
data[:, n] /= header['fADCProgrammableGain'][i]
if header['nTelegraphEnable'][i]:
data[:, n] /= header['fTelegraphAdditGain'][i]
data[:, n] *= header['fADCRange']
data[:, n] /= header['lADCResolution']
data[:, n] += header['fInstrumentOffset'][i]
data[:, n] -= header['fSignalOffset'][i]
def reformat_integer_V2(data, nbchannel, header):
"""
reformat when dtype is int16 for ABF version 2
"""
for i in range(nbchannel):
data[:, i] /= header['listADCInfo'][i]['fInstrumentScaleFactor']
data[:, i] /= header['listADCInfo'][i]['fSignalGain']
data[:, i] /= header['listADCInfo'][i]['fADCProgrammableGain']
if header['listADCInfo'][i]['nTelegraphEnable']:
data[:, i] /= header['listADCInfo'][i]['fTelegraphAdditGain']
data[:, i] *= header['protocol']['fADCRange']
data[:, i] /= header['protocol']['lADCResolution']
data[:, i] += header['listADCInfo'][i]['fInstrumentOffset']
data[:, i] -= header['listADCInfo'][i]['fSignalOffset']
def clean_string(s):
s = s.rstrip(b'\x00')
s = s.rstrip(b' ')
return s
class AxonIO(BaseIO):
"""
Class for reading abf (axon binary file) file.
Usage:
>>> from neo import io
>>> r = io.AxonIO(filename='File_axon_1.abf')
>>> bl = r.read_block(lazy=False, cascade=True)
>>> print bl.segments
[<neo.core.segment.Segment object at 0x105516fd0>]
>>> print bl.segments[0].analogsignals
[<AnalogSignal(array([2.18811035, 2.19726562, 2.21252441, ...,
1.33056641, 1.3458252, 1.3671875], dtype=float32) * pA,
[0.0 s, 191.2832 s], sampling rate: 10000.0 Hz)>]
>>> print bl.segments[0].eventarrays
[]
"""
is_readable = True
is_writable = False
supported_objects = [Block, Segment, AnalogSignal, EventArray]
readable_objects = [Block]
writeable_objects = []
has_header = False
is_streameable = False
read_params = {Block: []}
write_params = None
name = 'Axon'
extensions = ['abf']
mode = 'file'
def __init__(self, filename=None):
"""
This class read a abf file.
Arguments:
filename : the filename to read
"""
BaseIO.__init__(self)
self.filename = filename
def read_block(self, lazy=False, cascade=True):
header = self.read_header()
version = header['fFileVersionNumber']
bl = Block()
bl.file_origin = os.path.basename(self.filename)
bl.annotate(abf_version=version)
# date and time
if version < 2.:
YY = 1900
MM = 1
DD = 1
hh = int(header['lFileStartTime'] / 3600.)
mm = int((header['lFileStartTime'] - hh * 3600) / 60)
ss = header['lFileStartTime'] - hh * 3600 - mm * 60
ms = int(np.mod(ss, 1) * 1e6)
ss = int(ss)
elif version >= 2.:
YY = int(header['uFileStartDate'] / 10000)
MM = int((header['uFileStartDate'] - YY * 10000) / 100)
DD = int(header['uFileStartDate'] - YY * 10000 - MM * 100)
hh = int(header['uFileStartTimeMS'] / 1000. / 3600.)
mm = int((header['uFileStartTimeMS'] / 1000. - hh * 3600) / 60)
ss = header['uFileStartTimeMS'] / 1000. - hh * 3600 - mm * 60
ms = int(np.mod(ss, 1) * 1e6)
ss = int(ss)
bl.rec_datetime = datetime.datetime(YY, MM, DD, hh, mm, ss, ms)
if not cascade:
return bl
# file format
if header['nDataFormat'] == 0:
dt = np.dtype('i2')
elif header['nDataFormat'] == 1:
dt = np.dtype('f4')
if version < 2.:
nbchannel = header['nADCNumChannels']
headOffset = header['lDataSectionPtr'] * BLOCKSIZE +\
header['nNumPointsIgnored'] * dt.itemsize
totalsize = header['lActualAcqLength']
elif version >= 2.:
nbchannel = header['sections']['ADCSection']['llNumEntries']
headOffset = header['sections']['DataSection']['uBlockIndex'] *\
BLOCKSIZE
totalsize = header['sections']['DataSection']['llNumEntries']
data = np.memmap(self.filename, dt, 'r',
shape=(totalsize,), offset=headOffset)
# 3 possible modes
if version < 2.:
mode = header['nOperationMode']
elif version >= 2.:
mode = header['protocol']['nOperationMode']
#~ print 'mode', mode
if (mode == 1) or (mode == 2) or (mode == 5) or (mode == 3):
# event-driven variable-length mode (mode 1)
# event-driven fixed-length mode (mode 2 or 5)
# gap free mode 3 can be in several episod (strange but possible)
# read sweep pos
if version < 2.:
nbepisod = header['lSynchArraySize']
offsetEpisod = header['lSynchArrayPtr'] * BLOCKSIZE
elif version >= 2.:
SAS = header['sections']['SynchArraySection']
nbepisod = SAS['llNumEntries']
offsetEpisod = SAS['uBlockIndex'] * BLOCKSIZE
if nbepisod > 0:
episodArray = np.memmap(self.filename, [('offset', 'i4'),
('len', 'i4')], 'r', shape=(nbepisod),
offset=offsetEpisod)
else:
episodArray = np.empty((1), [('offset', 'i4'), ('len', 'i4')],)
episodArray[0]['len'] = data.size
episodArray[0]['offset'] = 0
# sampling_rate
if version < 2.:
sampling_rate = 1. / (header['fADCSampleInterval'] *
nbchannel * 1.e-6) * pq.Hz
elif version >= 2.:
sampling_rate = 1.e6 / \
header['protocol']['fADCSequenceInterval'] * pq.Hz
# construct block
# one sweep = one segment in a block
pos = 0
for j in range(episodArray.size):
seg = Segment(index=j)
length = episodArray[j]['len']
if version < 2.:
fSynchTimeUnit = header['fSynchTimeUnit']
elif version >= 2.:
fSynchTimeUnit = header['protocol']['fSynchTimeUnit']
if (fSynchTimeUnit != 0) and (mode == 1):
length /= fSynchTimeUnit
if not lazy:
subdata = data[pos:pos+length]
subdata = subdata.reshape((subdata.size/nbchannel,
nbchannel)).astype('f')
if dt == np.dtype('i2'):
if version < 2.:
reformat_integer_V1(subdata, nbchannel, header)
elif version >= 2.:
reformat_integer_V2(subdata, nbchannel, header)
pos += length
if version < 2.:
chans = [chan_num for chan_num in
header['nADCSamplingSeq'] if chan_num >= 0]
else:
chans = range(nbchannel)
for n, i in enumerate(chans[:nbchannel]): # fix SamplingSeq
if version < 2.:
name = header['sADCChannelName'][i].replace(b' ', b'')
unit = header['sADCUnits'][i].replace(b'\xb5', b'u').\
replace(b' ', b'').decode('utf-8') # \xb5 is µ
num = header['nADCPtoLChannelMap'][i]
elif version >= 2.:
lADCIi = header['listADCInfo'][i]
name = lADCIi['ADCChNames'].replace(b' ', b'')
unit = lADCIi['ADCChUnits'].replace(b'\xb5', b'u').\
replace(b' ', b'').decode('utf-8')
num = header['listADCInfo'][i]['nADCNum']
t_start = float(episodArray[j]['offset']) / sampling_rate
t_start = t_start.rescale('s')
try:
pq.Quantity(1, unit)
except:
unit = ''
if lazy:
signal = [] * pq.Quantity(1, unit)
else:
signal = pq.Quantity(subdata[:, n], unit)
anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
t_start=t_start,
name=str(name),
channel_index=int(num))
if lazy:
anaSig.lazy_shape = length / nbchannel
seg.analogsignals.append(anaSig)
bl.segments.append(seg)
if mode in [3, 5]: # TODO check if tags exits in other mode
# tag is EventArray that should be attached to Block
# It is attched to the first Segment
times = []
labels = []
comments = []
for i, tag in enumerate(header['listTag']):
times.append(tag['lTagTime']/sampling_rate)
labels.append(str(tag['nTagType']))
comments.append(clean_string(tag['sComment']))
times = np.array(times)
labels = np.array(labels, dtype='S')
comments = np.array(comments, dtype='S')
# attach all tags to the first segment.
seg = bl.segments[0]
if lazy:
ea = EventArray(times=[] * pq.s,
labels=np.array([], dtype='S'))
ea.lazy_shape = len(times)
else:
ea = EventArray(times=times*pq.s,
labels=labels, comments=comments)
seg.eventarrays.append(ea)
bl.create_many_to_one_relationship()
return bl
def read_header(self,):
"""
read the header of the file
The strategy differ here from the original script under Matlab.
In the original script for ABF2, it complete the header with
informations that are located in other structures.
In ABF2 this function return header with sub dict :
sections (ABF2)
protocol (ABF2)
listTags (ABF1&2)
listADCInfo (ABF2)
listDACInfo (ABF2)
dictEpochInfoPerDAC (ABF2)
that contain more information.
"""
fid = struct_file(open(self.filename, 'rb')) # fix for py3
# version
fFileSignature = fid.read(4)
if fFileSignature == b'ABF ': # fix for p3 where read returns bytes
headerDescription = headerDescriptionV1
elif fFileSignature == b'ABF2':
headerDescription = headerDescriptionV2
else:
return None
# construct dict
header = {}
for key, offset, fmt in headerDescription:
val = fid.read_f(fmt, offset=offset)
if len(val) == 1:
header[key] = val[0]
else:
header[key] = np.array(val)
# correction of version number and starttime
if fFileSignature == b'ABF ':
header['lFileStartTime'] = header['lFileStartTime'] +\
header['nFileStartMillisecs'] * .001
elif fFileSignature == b'ABF2':
n = header['fFileVersionNumber']
header['fFileVersionNumber'] = n[3] + 0.1 * n[2] +\
0.01 * n[1] + 0.001 * n[0]
header['lFileStartTime'] = header['uFileStartTimeMS'] * .001
if header['fFileVersionNumber'] < 2.:
# tags
listTag = []
for i in range(header['lNumTagEntries']):
fid.seek(header['lTagSectionPtr'] + i * 64)
tag = {}
for key, fmt in TagInfoDescription:
val = fid.read_f(fmt)
if len(val) == 1:
tag[key] = val[0]
else:
tag[key] = np.array(val)
listTag.append(tag)
header['listTag'] = listTag
#protocol name formatting #TODO move to read_protocol?
header['sProtocolPath'] = clean_string(header['sProtocolPath'])
header['sProtocolPath'] = header['sProtocolPath'].\
replace(b'\\', b'/')
elif header['fFileVersionNumber'] >= 2.:
# in abf2 some info are in other place
# sections
sections = {}
for s, sectionName in enumerate(sectionNames):
uBlockIndex, uBytes, llNumEntries =\
fid.read_f('IIl', offset=76 + s * 16)
sections[sectionName] = {}
sections[sectionName]['uBlockIndex'] = uBlockIndex
sections[sectionName]['uBytes'] = uBytes
sections[sectionName]['llNumEntries'] = llNumEntries
header['sections'] = sections
# strings sections
# hack for reading channels names and units
fid.seek(sections['StringsSection']['uBlockIndex'] * BLOCKSIZE)
bigString = fid.read(sections['StringsSection']['uBytes'])
goodstart = bigString.lower().find(b'clampex')
if goodstart == -1:
goodstart = bigString.lower().find(b'axoscope')
bigString = bigString[goodstart:]
strings = bigString.split(b'\x00')
# ADC sections
header['listADCInfo'] = []
for i in range(sections['ADCSection']['llNumEntries']):
# read ADCInfo
fid.seek(sections['ADCSection']['uBlockIndex'] *
BLOCKSIZE + sections['ADCSection']['uBytes'] * i)
ADCInfo = {}
for key, fmt in ADCInfoDescription:
val = fid.read_f(fmt)
if len(val) == 1:
ADCInfo[key] = val[0]
else:
ADCInfo[key] = np.array(val)
ADCInfo['ADCChNames'] = strings[ADCInfo['lADCChannelNameIndex']
- 1]
ADCInfo['ADCChUnits'] = strings[ADCInfo['lADCUnitsIndex'] - 1]
header['listADCInfo'].append(ADCInfo)
# protocol sections
protocol = {}
fid.seek(sections['ProtocolSection']['uBlockIndex'] * BLOCKSIZE)
for key, fmt in protocolInfoDescription:
val = fid.read_f(fmt)
if len(val) == 1:
protocol[key] = val[0]
else:
protocol[key] = np.array(val)
header['protocol'] = protocol
# tags
listTag = []
for i in range(sections['TagSection']['llNumEntries']):
fid.seek(sections['TagSection']['uBlockIndex'] *
BLOCKSIZE + sections['TagSection']['uBytes'] * i)
tag = {}
for key, fmt in TagInfoDescription:
val = fid.read_f(fmt)
if len(val) == 1:
tag[key] = val[0]
else:
tag[key] = np.array(val)
listTag.append(tag)
header['listTag'] = listTag
# DAC sections
header['listDACInfo'] = []
for i in range(sections['DACSection']['llNumEntries']):
# read DACInfo
fid.seek(sections['DACSection']['uBlockIndex'] *
BLOCKSIZE + sections['DACSection']['uBytes'] * i)
DACInfo = {}
for key, fmt in DACInfoDescription:
val = fid.read_f(fmt)
if len(val) == 1:
DACInfo[key] = val[0]
else:
DACInfo[key] = np.array(val)
DACInfo['DACChNames'] = strings[DACInfo['lDACChannelNameIndex']
- 1]
DACInfo['DACChUnits'] = strings[
DACInfo['lDACChannelUnitsIndex'] - 1]
header['listDACInfo'].append(DACInfo)
# EpochPerDAC sections
# header['dictEpochInfoPerDAC'] is dict of dicts:
# - the first index is the DAC number
# - the second index is the epoch number
# It has to be done like that because data may not exist
# and may not be in sorted order
header['dictEpochInfoPerDAC'] = {}
for i in range(sections['EpochPerDACSection']['llNumEntries']):
# read DACInfo
fid.seek(sections['EpochPerDACSection']['uBlockIndex'] *
BLOCKSIZE +
sections['EpochPerDACSection']['uBytes'] * i)
EpochInfoPerDAC = {}
for key, fmt in EpochInfoPerDACDescription:
val = fid.read_f(fmt)
if len(val) == 1:
EpochInfoPerDAC[key] = val[0]
else:
EpochInfoPerDAC[key] = np.array(val)
DACNum = EpochInfoPerDAC['nDACNum']
EpochNum = EpochInfoPerDAC['nEpochNum']
# Checking if the key exists, if not, the value is empty
# so we have to create empty dict to populate
if DACNum not in header['dictEpochInfoPerDAC']:
header['dictEpochInfoPerDAC'][DACNum] = {}
header['dictEpochInfoPerDAC'][DACNum][EpochNum] =\
EpochInfoPerDAC
fid.close()
return header
def read_protocol(self):
"""
Read the protocol waveform of the file, if present;
function works with ABF2 only. Protocols can be reconstructed
from the ABF1 header.
Returns: list of segments (one for every episode)
with list of analog signls (one for every DAC).
"""
header = self.read_header()
if header['fFileVersionNumber'] < 2.:
raise IOError("Protocol section is only present in ABF2 files.")
nADC = header['sections']['ADCSection']['llNumEntries'] # n ADC chans
nDAC = header['sections']['DACSection']['llNumEntries'] # n DAC chans
nSam = header['protocol']['lNumSamplesPerEpisode']/nADC # samples/ep
nEpi = header['lActualEpisodes']
sampling_rate = 1.e6/header['protocol']['fADCSequenceInterval'] * pq.Hz
# Make a list of segments with analog signals with just holding levels
# List of segments relates to number of episodes, as for recorded data
segments = []
for epiNum in range(nEpi):
seg = Segment(index=epiNum)
# One analog signal for each DAC in segment (episode)
for DACNum in range(nDAC):
t_start = 0 * pq.s # TODO: Possibly check with episode array
name = header['listDACInfo'][DACNum]['DACChNames']
unit = header['listDACInfo'][DACNum]['DACChUnits'].\
replace(b'\xb5', b'u') # \xb5 is µ
signal = np.ones(nSam) *\
header['listDACInfo'][DACNum]['fDACHoldingLevel'] *\
pq.Quantity(1, unit)
anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
t_start=t_start, name=str(name),
channel_index=DACNum)
# If there are epoch infos for this DAC
if DACNum in header['dictEpochInfoPerDAC']:
# Save last sample index
i_last = int(nSam * 15625 / 10**6)
# TODO guess for first holding
# Go over EpochInfoPerDAC and change the analog signal
# according to the epochs
epochInfo = header['dictEpochInfoPerDAC'][DACNum]
for epochNum, epoch in iteritems(epochInfo):
i_begin = i_last
i_end = i_last + epoch['lEpochInitDuration'] +\
epoch['lEpochDurationInc'] * epiNum
dif = i_end-i_begin
anaSig[i_begin:i_end] = np.ones(len(range(dif))) *\
pq.Quantity(1, unit) * (epoch['fEpochInitLevel'] +
epoch['fEpochLevelInc'] *
epiNum)
i_last += epoch['lEpochInitDuration']
seg.analogsignals.append(anaSig)
segments.append(seg)
return segments
BLOCKSIZE = 512
headerDescriptionV1 = [
('fFileSignature', 0, '4s'),
('fFileVersionNumber', 4, 'f'),
('nOperationMode', 8, 'h'),
('lActualAcqLength', 10, 'i'),
('nNumPointsIgnored', 14, 'h'),
('lActualEpisodes', 16, 'i'),
('lFileStartTime', 24, 'i'),
('lDataSectionPtr', 40, 'i'),
('lTagSectionPtr', 44, 'i'),
('lNumTagEntries', 48, 'i'),
('lSynchArrayPtr', 92, 'i'),
('lSynchArraySize', 96, 'i'),
('nDataFormat', 100, 'h'),
('nADCNumChannels', 120, 'h'),
('fADCSampleInterval', 122, 'f'),
('fSynchTimeUnit', 130, 'f'),
('lNumSamplesPerEpisode', 138, 'i'),
('lPreTriggerSamples', 142, 'i'),
('lEpisodesPerRun', 146, 'i'),
('fADCRange', 244, 'f'),
('lADCResolution', 252, 'i'),
('nFileStartMillisecs', 366, 'h'),
('nADCPtoLChannelMap', 378, '16h'),
('nADCSamplingSeq', 410, '16h'),
('sADCChannelName', 442, '10s'*16),
('sADCUnits', 602, '8s'*16),
('fADCProgrammableGain', 730, '16f'),
('fInstrumentScaleFactor', 922, '16f'),
('fInstrumentOffset', 986, '16f'),
('fSignalGain', 1050, '16f'),
('fSignalOffset', 1114, '16f'),
('nDigitalEnable', 1436, 'h'),
('nActiveDACChannel', 1440, 'h'),
('nDigitalHolding', 1584, 'h'),
('nDigitalInterEpisode', 1586, 'h'),
('nDigitalValue', 2588, '10h'),
('lDACFilePtr', 2048, '2i'),
('lDACFileNumEpisodes', 2056, '2i'),
('fDACCalibrationFactor', 2074, '4f'),
('fDACCalibrationOffset', 2090, '4f'),
('nWaveformEnable', 2296, '2h'),
('nWaveformSource', 2300, '2h'),
('nInterEpisodeLevel', 2304, '2h'),
('nEpochType', 2308, '20h'),
('fEpochInitLevel', 2348, '20f'),
('fEpochLevelInc', 2428, '20f'),
('lEpochInitDuration', 2508, '20i'),
('lEpochDurationInc', 2588, '20i'),
('nTelegraphEnable', 4512, '16h'),
('fTelegraphAdditGain', 4576, '16f'),
('sProtocolPath', 4898, '384s'),
]
headerDescriptionV2 = [
('fFileSignature', 0, '4s'),
('fFileVersionNumber', 4, '4b'),
('uFileInfoSize', 8, 'I'),
('lActualEpisodes', 12, 'I'),
('uFileStartDate', 16, 'I'),
('uFileStartTimeMS', 20, 'I'),
('uStopwatchTime', 24, 'I'),
('nFileType', 28, 'H'),
('nDataFormat', 30, 'H'),
('nSimultaneousScan', 32, 'H'),
('nCRCEnable', 34, 'H'),
('uFileCRC', 36, 'I'),
('FileGUID', 40, 'I'),
('uCreatorVersion', 56, 'I'),
('uCreatorNameIndex', 60, 'I'),
('uModifierVersion', 64, 'I'),
('uModifierNameIndex', 68, 'I'),
('uProtocolPathIndex', 72, 'I'),
]
sectionNames = [
'ProtocolSection',
'ADCSection',
'DACSection',
'EpochSection',
'ADCPerDACSection',
'EpochPerDACSection',
'UserListSection',
'StatsRegionSection',
'MathSection',
'StringsSection',
'DataSection',
'TagSection',
'ScopeSection',
'DeltaSection',
'VoiceTagSection',
'SynchArraySection',
'AnnotationSection',
'StatsSection',
]
protocolInfoDescription = [
('nOperationMode', 'h'),
('fADCSequenceInterval', 'f'),
('bEnableFileCompression', 'b'),
('sUnused1', '3s'),
('uFileCompressionRatio', 'I'),
('fSynchTimeUnit', 'f'),
('fSecondsPerRun', 'f'),
('lNumSamplesPerEpisode', 'i'),
('lPreTriggerSamples', 'i'),
('lEpisodesPerRun', 'i'),
('lRunsPerTrial', 'i'),
('lNumberOfTrials', 'i'),
('nAveragingMode', 'h'),
('nUndoRunCount', 'h'),
('nFirstEpisodeInRun', 'h'),
('fTriggerThreshold', 'f'),
('nTriggerSource', 'h'),
('nTriggerAction', 'h'),
('nTriggerPolarity', 'h'),
('fScopeOutputInterval', 'f'),
('fEpisodeStartToStart', 'f'),
('fRunStartToStart', 'f'),
('lAverageCount', 'i'),
('fTrialStartToStart', 'f'),
('nAutoTriggerStrategy', 'h'),
('fFirstRunDelayS', 'f'),
('nChannelStatsStrategy', 'h'),
('lSamplesPerTrace', 'i'),
('lStartDisplayNum', 'i'),
('lFinishDisplayNum', 'i'),
('nShowPNRawData', 'h'),
('fStatisticsPeriod', 'f'),
('lStatisticsMeasurements', 'i'),
('nStatisticsSaveStrategy', 'h'),
('fADCRange', 'f'),
('fDACRange', 'f'),
('lADCResolution', 'i'),
('lDACResolution', 'i'),
('nExperimentType', 'h'),
('nManualInfoStrategy', 'h'),
('nCommentsEnable', 'h'),
('lFileCommentIndex', 'i'),
('nAutoAnalyseEnable', 'h'),
('nSignalType', 'h'),
('nDigitalEnable', 'h'),
('nActiveDACChannel', 'h'),
('nDigitalHolding', 'h'),
('nDigitalInterEpisode', 'h'),
('nDigitalDACChannel', 'h'),
('nDigitalTrainActiveLogic', 'h'),
('nStatsEnable', 'h'),
('nStatisticsClearStrategy', 'h'),
('nLevelHysteresis', 'h'),
('lTimeHysteresis', 'i'),
('nAllowExternalTags', 'h'),
('nAverageAlgorithm', 'h'),
('fAverageWeighting', 'f'),
('nUndoPromptStrategy', 'h'),
('nTrialTriggerSource', 'h'),
('nStatisticsDisplayStrategy', 'h'),
('nExternalTagType', 'h'),
('nScopeTriggerOut', 'h'),
('nLTPType', 'h'),
('nAlternateDACOutputState', 'h'),
('nAlternateDigitalOutputState', 'h'),
('fCellID', '3f'),
('nDigitizerADCs', 'h'),
('nDigitizerDACs', 'h'),
('nDigitizerTotalDigitalOuts', 'h'),
('nDigitizerSynchDigitalOuts', 'h'),
('nDigitizerType', 'h'),
]
ADCInfoDescription = [
('nADCNum', 'h'),
('nTelegraphEnable', 'h'),
('nTelegraphInstrument', 'h'),
('fTelegraphAdditGain', 'f'),
('fTelegraphFilter', 'f'),
('fTelegraphMembraneCap', 'f'),
('nTelegraphMode', 'h'),
('fTelegraphAccessResistance', 'f'),
('nADCPtoLChannelMap', 'h'),
('nADCSamplingSeq', 'h'),
('fADCProgrammableGain', 'f'),
('fADCDisplayAmplification', 'f'),
('fADCDisplayOffset', 'f'),
('fInstrumentScaleFactor', 'f'),
('fInstrumentOffset', 'f'),
('fSignalGain', 'f'),
('fSignalOffset', 'f'),
('fSignalLowpassFilter', 'f'),
('fSignalHighpassFilter', 'f'),
('nLowpassFilterType', 'b'),
('nHighpassFilterType', 'b'),
('fPostProcessLowpassFilter', 'f'),
('nPostProcessLowpassFilterType', 'c'),
('bEnabledDuringPN', 'b'),
('nStatsChannelPolarity', 'h'),
('lADCChannelNameIndex', 'i'),
('lADCUnitsIndex', 'i'),
]
TagInfoDescription = [
('lTagTime', 'i'),
('sComment', '56s'),
('nTagType', 'h'),
('nVoiceTagNumber_or_AnnotationIndex', 'h'),
]
DACInfoDescription = [
('nDACNum', 'h'),
('nTelegraphDACScaleFactorEnable', 'h'),
('fInstrumentHoldingLevel', 'f'),
('fDACScaleFactor', 'f'),
('fDACHoldingLevel', 'f'),
('fDACCalibrationFactor', 'f'),
('fDACCalibrationOffset', 'f'),
('lDACChannelNameIndex', 'i'),
('lDACChannelUnitsIndex', 'i'),
('lDACFilePtr', 'i'),
('lDACFileNumEpisodes', 'i'),
('nWaveformEnable', 'h'),
('nWaveformSource', 'h'),
('nInterEpisodeLevel', 'h'),
('fDACFileScale', 'f'),
('fDACFileOffset', 'f'),
('lDACFileEpisodeNum', 'i'),
('nDACFileADCNum', 'h'),
('nConditEnable', 'h'),
('lConditNumPulses', 'i'),
('fBaselineDuration', 'f'),
('fBaselineLevel', 'f'),
('fStepDuration', 'f'),
('fStepLevel', 'f'),
('fPostTrainPeriod', 'f'),
('fPostTrainLevel', 'f'),
('nMembTestEnable', 'h'),
('nLeakSubtractType', 'h'),
('nPNPolarity', 'h'),
('fPNHoldingLevel', 'f'),
('nPNNumADCChannels', 'h'),
('nPNPosition', 'h'),
('nPNNumPulses', 'h'),
('fPNSettlingTime', 'f'),
('fPNInterpulse', 'f'),
('nLTPUsageOfDAC', 'h'),
('nLTPPresynapticPulses', 'h'),
('lDACFilePathIndex', 'i'),
('fMembTestPreSettlingTimeMS', 'f'),
('fMembTestPostSettlingTimeMS', 'f'),
('nLeakSubtractADCIndex', 'h'),
('sUnused', '124s'),
]
EpochInfoPerDACDescription = [
('nEpochNum', 'h'),
('nDACNum', 'h'),
('nEpochType', 'h'),
('fEpochInitLevel', 'f'),
('fEpochLevelInc', 'f'),
('lEpochInitDuration', 'i'),
('lEpochDurationInc', 'i'),
('lEpochPulsePeriod', 'i'),
('lEpochPulseWidth', 'i'),
('sUnused', '18s'),
]
EpochInfoDescription = [
('nEpochNum', 'h'),
('nDigitalValue', 'h'),
('nDigitalTrainValue', 'h'),
('nAlternateDigitalValue', 'h'),
('nAlternateDigitalTrainValue', 'h'),
('bEpochCompression', 'b'),
('sUnused', '21s'),
]
|
bsd-3-clause
| -1,758,531,858,273,080,600
| 35.436133
| 79
| 0.507408
| false
| 3.785432
| false
| false
| false
|
savex/spectra
|
tools/salt_networks.py
|
1
|
2298
|
import re
import sys
import subprocess
import json
def shell(command):
_ps = subprocess.Popen(
command.split(),
stdout=subprocess.PIPE
).communicate()[0].decode()
return _ps
def cut_option(_param, _options_list):
_option = "n/a"
_result_list = []
if _param in _options_list:
_index = _options_list.index(_param)
_option = _options_list[_index+1]
_l1 = _options_list[:_index]
_l2 = _options_list[_index+2:]
_result_list = _l1 + _l2
else:
_result_list = _options_list
return _option, _result_list
def get_ifs_data():
_ifs_raw = shell('ip a')
if_start = re.compile("^[0-9]+: .*: \<.*\> .*$")
if_ipv4 = re.compile("^\s{4}inet\ .*$")
_ifs = {}
_if_name = None
for line in _ifs_raw.splitlines():
_if_data = {}
if if_start.match(line):
_tmp = line.split(':')
_if_name = _tmp[1].strip()
_if_options = _tmp[2].strip().split(' ')
_if_data['order'] = _tmp[0]
_if_data['mtu'], _if_options = cut_option("mtu", _if_options)
_if_data['qlen'], _if_options = cut_option("qlen", _if_options)
_if_data['state'], _if_options = cut_option("state", _if_options)
_if_data['other'] = _if_options
_if_data['ipv4'] = {}
_ifs[_if_name] = _if_data
elif if_ipv4.match(line):
if _if_name is None:
continue
else:
_tmp = line.strip().split(' ', 2)
_ip = _tmp[1]
_options = _tmp[2].split(' ')
_brd, _options = cut_option("brd", _options)
# TODO: Parse other options, mask, brd, etc...
_ifs[_if_name]['ipv4'][_ip] = {}
_ifs[_if_name]['ipv4'][_ip]['brd'] = _brd
_ifs[_if_name]['ipv4'][_ip]['other'] = _options
return _ifs
ifs_data = get_ifs_data()
# _ifs = sorted(ifs_data.keys())
# _ifs.remove("lo")
# for _idx in range(len(_ifs)):
# print("{:25}: {:20} {:10} {:5}".format(
# _ifs[_idx],
# " ".join(ifs_data[_ifs[_idx]]['ipv4'].keys()),
# ifs_data[_ifs[_idx]]['mtu'],
# ifs_data[_ifs[_idx]]['state']
# ))
buff = json.dumps(ifs_data)
sys.stdout.write(buff)
|
apache-2.0
| -6,106,116,033,646,082,000
| 33.313433
| 77
| 0.483899
| false
| 3.113821
| false
| false
| false
|
nschloe/quadpy
|
src/quadpy/c1/_fejer.py
|
1
|
1547
|
import numpy as np
from ..helpers import article
from ._helpers import C1Scheme
source = article(
authors=["J. Waldvogel"],
title="Fast Construction of the Fejér and Clenshaw–Curtis Quadrature Rules",
journal="BIT Numerical Mathematics",
month="mar",
year="2006",
volume="46",
number="1",
pages="195–202",
url="https://doi.org/10.1007/s10543-006-0045-4",
)
def fejer_1(n):
degree = n
points = -np.cos(np.pi * (np.arange(n) + 0.5) / n)
# n -= 1
N = np.arange(1, n, 2)
length = len(N)
m = n - length
K = np.arange(m)
v0 = np.concatenate(
[
2 * np.exp(1j * np.pi * K / n) / (1 - 4 * K ** 2),
np.zeros(length + 1),
]
)
v1 = v0[:-1] + np.conjugate(v0[:0:-1])
w = np.fft.ifft(v1)
assert max(w.imag) < 1.0e-15
weights = w.real
return C1Scheme("Fejér 1", degree, weights, points, source)
def fejer_2(n):
degree = n
points = -np.cos((np.pi * np.arange(1, n + 1)) / (n + 1))
n += 1
N = np.arange(1, n, 2)
length = len(N)
m = n - length
v0 = np.concatenate([2.0 / N / (N - 2), np.array([1.0 / N[-1]]), np.zeros(m)])
v2 = -v0[:-1] - v0[:0:-1]
w = np.fft.ihfft(v2)
assert max(w.imag) < 1.0e-15
w = w.real
if n % 2 == 1:
weights = np.concatenate([w, w[::-1]])
else:
weights = np.concatenate([w, w[len(w) - 2 :: -1]])
# cut off first and last
weights = weights[1:-1]
return C1Scheme("Fejér 2", degree, weights, points, source)
|
mit
| -6,587,747,661,665,124,000
| 21.647059
| 82
| 0.523377
| false
| 2.66436
| false
| false
| false
|
felixonmars/suds-ng
|
suds/bindings/document.py
|
1
|
5736
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides classes for the (WS) SOAP I{document/literal}.
"""
from logging import getLogger
from suds.bindings.binding import Binding
from suds.sax.element import Element
log = getLogger(__name__)
class Document(Binding):
"""
The document/literal style. Literal is the only (@use) supported
since document/encoded is pretty much dead.
Although the soap specification supports multiple documents within the soap
<body/>, it is very uncommon. As such, suds presents an I{RPC} view of
service methods defined with a single document parameter. This is done so
that the user can pass individual parameters instead of one, single document.
To support the complete specification, service methods defined with multiple documents
(multiple message parts), must present a I{document} view for that method.
"""
def bodycontent(self, method, args, kwargs):
#
# The I{wrapped} vs I{bare} style is detected in 2 ways.
# If there is 2+ parts in the message then it is I{bare}.
# If there is only (1) part and that part resolves to a builtin then
# it is I{bare}. Otherwise, it is I{wrapped}.
#
if not len(method.soap.input.body.parts):
return ()
wrapped = method.soap.input.body.wrapped
if wrapped:
pts = self.bodypart_types(method)
root = self.document(pts[0])
else:
root = []
n = 0
for pd in self.param_defs(method):
if n < len(args):
value = args[n]
else:
value = kwargs.get(pd[0])
n += 1
p = self.mkparam(method, pd, value)
if p is None:
continue
if not wrapped:
ns = pd[1].namespace('ns0')
p.setPrefix(ns[0], ns[1])
root.append(p)
return root
def replycontent(self, method, body):
wrapped = method.soap.output.body.wrapped
if wrapped:
return body[0].children
else:
return body.children
def document(self, wrapper):
"""
Get the document root. For I{document/literal}, this is the
name of the wrapper element qualifed by the schema tns.
@param wrapper: The method name.
@type wrapper: L{xsd.sxbase.SchemaObject}
@return: A root element.
@rtype: L{Element}
"""
tag = wrapper[1].name
ns = wrapper[1].namespace('ns0')
d = Element(tag, ns=ns)
return d
def mkparam(self, method, pdef, object):
#
# Expand list parameters into individual parameters
# each with the type information. This is because in document
# arrays are simply unbounded elements.
#
if isinstance(object, (list, tuple)):
tags = []
for item in object:
tags.append(self.mkparam(method, pdef, item))
return tags
else:
return Binding.mkparam(self, method, pdef, object)
def param_defs(self, method):
#
# Get parameter definitions for document literal.
# The I{wrapped} vs I{bare} style is detected in 2 ways.
# If there is 2+ parts in the message then it is I{bare}.
# If there is only (1) part and that part resolves to a builtin then
# it is I{bare}. Otherwise, it is I{wrapped}.
#
pts = self.bodypart_types(method)
wrapped = method.soap.input.body.wrapped
if not wrapped:
return pts
result = []
# wrapped
for p in pts:
resolved = p[1].resolve()
for child, ancestry in resolved:
if child.isattr():
continue
if self.bychoice(ancestry):
log.debug(
'%s\ncontained by <choice/>, excluded as param for %s()',
child,
method.name)
continue
result.append((child.name, child))
return result
def returned_types(self, method):
result = []
wrapped = method.soap.output.body.wrapped
rts = self.bodypart_types(method, input=False)
if wrapped:
for pt in rts:
resolved = pt.resolve(nobuiltin=True)
for child, ancestry in resolved:
result.append(child)
break
else:
result += rts
return result
def bychoice(self, ancestry):
"""
The ancestry contains a <choice/>
@param ancestry: A list of ancestors.
@type ancestry: list
@return: True if contains <choice/>
@rtype: boolean
"""
for x in ancestry:
if x.choice():
return True
return False
|
lgpl-3.0
| 1,296,709,443,985,954,300
| 35.075472
| 90
| 0.584902
| false
| 4.325792
| false
| false
| false
|
davisd50/sparc.i18n
|
setup.py
|
1
|
1392
|
from setuptools import setup, find_packages
import os
version = '0.0.1'
setup(name='sparc.i18n',
version=version,
description="i18n components for the SPARC platform",
long_description=open("README.md").read() + "\n" +
open("HISTORY.txt").read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
],
keywords=['zca'],
author='David Davis',
author_email='davisd50@gmail.com',
url='https://github.com/davisd50/sparc.i18n',
download_url = '',
license='MIT',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['sparc'],
include_package_data=True,
package_data = {
'': ['*.zcml']
},
zip_safe=False,
install_requires=[
'setuptools',
'zope.interface',
'zope.component',
'zope.i18nmessageid'
# -*- Extra requirements: -*-
],
tests_require=[
'sparc.testing'
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
mit
| 7,376,492,356,747,145,000
| 28.617021
| 60
| 0.54023
| false
| 3.954545
| false
| false
| false
|
pestefo/viz-youtube
|
data/check_id_in_different_days.py
|
1
|
1547
|
import csv
from collections import defaultdict
file1 = '/Users/pestefo/Sync/projects/information-visualization-course/proyecto/data/data.csv'
file2 = '/Users/pestefo/Sync/projects/information-visualization-course/proyecto/data/302.csv'
columns1 = defaultdict(list)
columns2 = defaultdict(list)
with open(file1, 'rU') as f:
reader = csv.DictReader(f)
for row in reader:
for (k,v) in row.items():
columns1[k].append(v)
with open(file2, 'rU') as f:
reader = csv.DictReader(f)
for row in reader:
for (k,v) in row.items():
columns2[k].append(v)
# related = set(columns1['related_1'])
# related.update(columns1['related_2'])
# related.update(columns1['related_3'])
# related.update(columns1['related_4'])
# related.update(columns1['related_5'])
# related.update(columns1['related_6'])
# related.update(columns1['related_7'])
# related.update(columns1['related_8'])
# related.update(columns1['related_9'])
# related.update(columns1['related_10'])
# related.update(columns1['related_11'])
# related.update(columns1['related_12'])
# related.update(columns1['related_13'])
# related.update(columns1['related_14'])
# related.update(columns1['related_15'])
# related.update(columns1['related_16'])
# related.update(columns1['related_17'])
# related.update(columns1['related_18'])
# related.update(columns1['related_19'])
# related.update(columns1['related_20'])
related = set(columns1['id'])
interseccion = related.intersection(set(columns2['id']))
union = related.union(set(columns2['id']))
print len(interseccion)
print len(union)
|
mit
| 8,330,908,455,000,977,000
| 29.96
| 94
| 0.725921
| false
| 2.975
| false
| false
| false
|
Petr-By/qtpyvis
|
qtgui/panels/logging.py
|
1
|
14855
|
"""
File: logging.py
Author: Ulf Krumnack
Email: krumnack@uni-osnabrueck.de
Github: https://github.com/krumnack
"""
# standard imports
import logging
# Qt imports
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QLabel, QPushButton
from PyQt5.QtWidgets import QCheckBox, QRadioButton, QButtonGroup
from PyQt5.QtWidgets import QListWidget, QListWidgetItem, QComboBox
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QGroupBox
# toolbox imports
from dltb.util.logging import RecorderHandler
from toolbox import Toolbox
# GUI imports
from .panel import Panel
from ..utils import protect
from ..widgets.logging import QLogHandler, QExceptionView
# logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class LoggingPanel(Panel):
"""A panel containing elements to log messages.
Attributes
----------
_log_handler: QLogHandler
A widget to display log messages
"""
_levels = {
"Fatal": logging.FATAL,
"Error": logging.ERROR,
"Warning": logging.WARNING,
"Info": logging.INFO,
"Debug": logging.DEBUG
}
def __init__(self, toolbox: Toolbox=None, **kwargs):
"""Initialization of the LoggingPael.
Parameters
----------
parent : QWidget
The parent argument is sent to the QWidget constructor.
"""
super().__init__(**kwargs)
self._loggingRecorder = None
self._toolbox = None
self._initUI()
self.setToolbox(toolbox)
def _initUI(self):
"""Add the UI elements
* The ``QLogHandler`` showing the log messages
"""
#
# Controls
#
self._log_handler = QLogHandler()
self._log_handler._message_signal.connect(self._new_message)
self._total = QLabel()
self._updateLogViewButton = QPushButton("Update")
self._updateLogViewButton.clicked.connect(self._onUpdateLogView)
self._updateLogViewButton.setEnabled(self._loggingRecorder is not None)
self._clearLogViewButton = QPushButton("Clear")
self._clearLogViewButton.clicked.connect(self._onClearLogView)
self._modules = QListWidget()
self._checkProcess = QCheckBox("Process")
self._checkProcess.clicked.connect(self._updateFormatter)
self._checkThread = QCheckBox("Thread")
self._checkThread.clicked.connect(self._updateFormatter)
self._checkName = QCheckBox("Name")
self._checkName.clicked.connect(self._updateFormatter)
self._checkModule = QCheckBox("Module")
self._checkModule.clicked.connect(self._updateFormatter)
self._checkFile = QCheckBox("File")
self._checkFile.clicked.connect(self._updateFormatter)
self._checkLevel = QCheckBox("Level")
self._checkLevel.clicked.connect(self._updateFormatter)
self._radio = {}
self._levelButtonGroup = QButtonGroup()
for label in self._levels.keys():
self._radio[label] = QRadioButton(label)
self._radio[label].clicked.connect(self._onLoggerLevelClicked)
self._levelButtonGroup.addButton(self._radio[label])
self._checkLoggerEnabled = QCheckBox("enabled")
self._checkLoggerEnabled.clicked.connect(self._onLoggerEnabledClicked)
self._buttonLoggerClearLevel = QPushButton("Clear Level")
self._buttonLoggerClearLevel.clicked.connect(self._onClearLevel)
self._effectiveLevel = QLabel()
self._loggerList = QListWidget()
self._loggerList.setSortingEnabled(True)
self._loggerList.currentItemChanged.connect(self._onCurrentLoggerChanged)
self._loggerList_refresh = QPushButton("Refresh")
self._loggerList_refresh.clicked.connect(self._updateLoggerList)
self._rootLoggerLevel = QComboBox()
for name, level in self._levels.items():
self._rootLoggerLevel.addItem(name, level)
self._rootLoggerLevel.currentIndexChanged.connect(self._onRootLevelChanged)
self._exceptionPanel = QExceptionPanel()
self._updateLoggerList()
self._layoutComponents()
def _layoutComponents(self):
"""Layout the UI elements.
* The ``QLogHandler`` displaying the log messages
"""
layout = QVBoxLayout()
row = QHBoxLayout()
row.addWidget(self._log_handler)
row.addWidget(self._exceptionPanel)
layout.addLayout(row)
row = QHBoxLayout()
text = QHBoxLayout()
text.addWidget(QLabel("Messages: "))
text.addWidget(self._total)
row.addLayout(text)
row.addWidget(self._updateLogViewButton)
row.addWidget(self._clearLogViewButton)
row.addWidget(self._checkProcess)
row.addWidget(self._checkThread)
row.addWidget(self._checkName)
row.addWidget(self._checkModule)
row.addWidget(self._checkFile)
row.addWidget(self._checkLevel)
row.addStretch()
layout.addLayout(row)
row = QHBoxLayout()
column = QVBoxLayout()
column.addWidget(self._loggerList)
column.addWidget(self._loggerList_refresh)
row.addLayout(column)
column = QVBoxLayout()
box = QGroupBox("Root Logger")
boxLayout = QVBoxLayout()
boxLayout.addWidget(self._rootLoggerLevel)
box.setLayout(boxLayout)
column.addWidget(box)
box = QGroupBox("Logger Details")
boxLayout = QVBoxLayout()
boxLayout.addWidget(self._checkLoggerEnabled)
line = QHBoxLayout()
line.addWidget(QLabel("Effective Level: "))
line.addWidget(self._effectiveLevel)
boxLayout.addLayout(line)
for button in self._radio.values():
boxLayout.addWidget(button)
boxLayout.addWidget(self._buttonLoggerClearLevel)
box.setLayout(boxLayout)
column.addWidget(box)
column.addStretch()
row.addLayout(column)
row.addWidget(self._modules)
layout.addLayout(row)
self.setLayout(layout)
def setToolbox(self, toolbox: Toolbox=None) -> None:
self._exceptionPanel.setToolbox(toolbox)
def addLogger(self, logger):
"""Add a logger to this :py:class:LoggingPanel.
LogRecords emitted by that logger will be processed.
"""
logger.addHandler(self._log_handler)
if self._loggingRecorder is not None:
logger.addHandler(self._loggingRecorder)
def removeLogger(self, logger):
"""Remove a logger from this :py:class:LoggingPanel.
LogRecords emitted by that logger will no longer be processed.
"""
logger.removeHandler(self._log_handler)
if self._loggingRecorder is not None:
logger.removeHandler(self._loggingRecorder)
def setLoggingRecorder(self, recorder: RecorderHandler) -> None:
"""Set a logging recorder for this :py:class:LoggingPanel.
Having a logging recorder allows to replay the log messages
recorded by that recorder.
"""
self._loggingRecorder = recorder
self._onUpdateLogView()
self._updateLogViewButton.setEnabled(recorder is not None)
def _new_message(self, message):
total = str(len(self._log_handler))
if self._loggingRecorder is not None:
total += "/" + str(len(self._loggingRecorder))
self._total.setText(total)
def _updateFormatter(self):
format = ""
if self._checkProcess.isChecked():
format += "[%(processName)s] "
if self._checkThread.isChecked():
format += "[%(threadName)s] "
if self._checkName.isChecked():
format += "(%(name)s) "
if self._checkModule.isChecked():
format += "%(module)s "
if self._checkFile.isChecked():
format += "%(filename)s:%(lineno)d: "
if self._checkLevel.isChecked():
format += "%(levelname)s: "
format += "%(message)s"
formatter = logging.Formatter(fmt=format, datefmt="%(asctime)s")
self._log_handler.setFormatter(formatter)
def _onClearLogView(self):
"""Update the log view.
"""
self._log_handler.clear()
def _onUpdateLogView(self):
"""Update the log view.
"""
if self._loggingRecorder is not None:
self._loggingRecorder.replay(self._log_handler)
def _decorateLoggerItem(self, item: QListWidgetItem,
logger: logging.Logger) -> None:
"""Decorate an entry in the logger list reflecting the properties
of the logger.
"""
item.setForeground(self._colorForLogLevel(logger.getEffectiveLevel()))
font = item.font()
font.setBold(bool(logger.level))
item.setFont(font)
item.setBackground(Qt.lightGray if logger.disabled else Qt.white)
def _updateLoggerList(self):
self._loggerList.clear()
self._updateLogger(None)
# FIXME[bug]: this may raise a RuntimeError:
# dictionary changed size during iteration
for name, logger in logging.Logger.manager.loggerDict.items():
if not isinstance(logger, logging.Logger):
continue
level = logger.getEffectiveLevel()
item = QListWidgetItem(name)
self._decorateLoggerItem(item, logger)
self._loggerList.addItem(item)
index = self._rootLoggerLevel.findData(logging.Logger.root.level)
self._rootLoggerLevel.setCurrentIndex(index)
def _onCurrentLoggerChanged(self, item: QListWidgetItem,
previous: QListWidgetItem) -> None:
"""A logger was selected in the logger list.
"""
logger = (None if item is None else
logging.Logger.manager.loggerDict[item.text()])
self._updateLogger(logger)
def _onRootLevelChanged(self, index: int) -> None:
logging.Logger.root.setLevel(self._rootLoggerLevel.currentData())
self._updateLoggerList()
def _updateLogger(self, logger: logging.Logger):
"""Update the logger group to reflect the currently selected
logger. If ther is no current logger (logger is None), then
the logger group is cleared and disabled.
"""
if logger is None or not logger.level:
checked = self._levelButtonGroup.checkedButton()
if checked is not None:
self._levelButtonGroup.setExclusive(False)
checked.setChecked(False)
self._levelButtonGroup.setExclusive(True)
self._checkLoggerEnabled.setCheckable(logger is not None)
for button in self._levelButtonGroup.buttons():
button.setCheckable(logger is not None)
if logger is None:
self._effectiveLevel.setText("")
else:
self._checkLoggerEnabled.setChecked(not logger.disabled)
self._effectiveLevel.setText(str(logger.getEffectiveLevel()))
if logger.level:
button = self._buttonForForLogLevel(logger.level)
if button is not None:
button.setChecked(True)
def _onLoggerEnabledClicked(self, checked: bool) -> None:
"""A logger enable/disable button was pressed.
"""
for item in self._loggerList.selectedItems():
logger = logging.Logger.manager.loggerDict[item.text()]
logger.disabled = not checked
self._decorateLoggerItem(item, logger)
def _onLoggerLevelClicked(self, checked: bool) -> None:
"""A logger level radio button was pressed.
"""
checked = self._levelButtonGroup.checkedButton()
level = 0 if checked is None else self._levels[checked.text()]
for item in self._loggerList.selectedItems():
logger = logging.Logger.manager.loggerDict[item.text()]
logger.setLevel(level)
self._decorateLoggerItem(item, logger)
def _onClearLevel(self) -> None:
"""Clear the individual log level of the current logger.
"""
logger = None
for item in self._loggerList.selectedItems():
logger = logging.Logger.manager.loggerDict[item.text()]
logger.setLevel(0)
self._decorateLoggerItem(item, logger)
self._updateLogger(logger)
def _buttonForForLogLevel(self, level):
for label, _level in self._levels.items():
if level == _level:
return self._radio[label]
return None
def _colorForLogLevel(self, level):
if level <= logging.DEBUG: return Qt.blue
if level <= logging.INFO: return Qt.green
if level <= logging.WARNING: return Qt.darkYellow
if level <= logging.ERROR: return Qt.red
if level <= logging.FATAL: return Qt.magenta
return Qt.black
from PyQt5.QtWidgets import QPlainTextEdit, QListWidget, QPushButton
class QExceptionPanel(QWidget):
"""
"""
def __init__(self, toolbox: Toolbox=None, **kwargs):
super().__init__(**kwargs)
self._toolbox = None
self._initUI()
self._layoutComponents()
def _initUI(self):
self._exceptionList = QListWidget()
self._exceptionList.currentItemChanged.\
connect(self._onCurrentExceptionChanged)
self._exceptionView = QExceptionView()
self._exceptionButton = QPushButton("Raise Test Exception")
self._exceptionButton.clicked.connect(self._onButtonClicked)
def _layoutComponents(self):
row = QHBoxLayout()
column = QVBoxLayout()
column.addWidget(self._exceptionList)
column.addWidget(self._exceptionButton)
row.addLayout(column)
row.addWidget(self._exceptionView)
self.setLayout(row)
def setToolbox(self, toolbox: Toolbox=None) -> None:
if self._toolbox is not None:
self._toolbox.remove_exception_handler(self.handleException)
self._toolbox = toolbox
if self._toolbox is not None:
self._toolbox.add_exception_handler(self.handleException)
def handleException(self, exception: BaseException) -> None:
self._exceptionView.setException(exception)
@protect
def _onCurrentExceptionChanged(self, item: QListWidgetItem,
previous: QListWidgetItem) -> None:
"""An exception was selected in the exception list.
"""
print(f"FIXME[todo]: exception changed: {item}, {previous}")
@protect
def _onButtonClicked(self, checked: bool) -> None:
"""The raise exceptoin button was pressed.
"""
raise RuntimeError("Just a test error.")
|
mit
| 4,858,607,966,636,792,000
| 34.538278
| 83
| 0.629754
| false
| 4.303302
| false
| false
| false
|
adobe-research/spark-gpu
|
data/generate_kmeans.py
|
1
|
1745
|
#!/home/ec2-user/anaconda/bin/python
###########################################################################
##
## Copyright (c) 2015 Adobe Systems Incorporated. All rights reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###########################################################################
import sys, getopt
import numpy as np
def kmeansGenerate(k, filename):
data = ""
for i in range(k):
floatlist = list(np.random.uniform(low=0.1, high=10, size=(3)))
floatlist = " ".join(map(str, floatlist)) + '\n'
data = data + floatlist
target = open(filename, 'w')
target.write(str(data))
target.close()
def bayesGenerate(k, filename):
data = ""
for i in range(k):
nplist = list(np.random.uniform(low=0.1, high=10, size=(4)))
intlist = [int(x) for x in nplist]
intlist = " ".join(map(str, intlist)) + '\n'
data = data + intlist
target = open(filename, 'w')
target.write(str(data))
target.close()
def main():
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: generate <length> <file>"
exit(-1)
kmeansGenerate(int(sys.argv[1]),sys.argv[2])
#bayesGenerate(int(sys.argv[1]),sys.argv[2])
if __name__ == "__main__":
main()
|
apache-2.0
| 4,764,295,083,545,138,000
| 30.727273
| 75
| 0.597708
| false
| 3.546748
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.