text stringlengths 4 1.02M | meta dict |
|---|---|
import SocketServer
import socket
import threading
import numpy as np
import cv2
import pygame
from pygame.locals import *
import socket
import time
import os
# SocketServer.ThreadingTCPServer.allow_reuse_address = True
RASP_IP = '192.168.43.70'
RASP_SERV_PORT = 7879
COMP_IP = '192.168.43.210'
COMP_SERV_PORT = 8002
command = {
# single commands
'rs': "rst_:",
'f': "fwd_:",
'rev': "rev_:",
'r': "rht_:",
'l': "lft_:",
# combination commands
'f_r': "f_rt:",
'f_l': "f_lf:",
'rev_r': "rv_r:",
'rev_l': "rv_l:",
# 5 character in each string
}
"""
9x9 output
k = [
[1, 0, 0, 0, 0, 0, 0, 0, 0], # left
[0, 1, 0, 0, 0, 0, 0, 0, 0], # right
[0, 0, 1, 0, 0, 0, 0, 0, 0], # forward
[0, 0, 0, 1, 0, 0, 0, 0, 0], # reverse
[0, 0, 0, 0, 1, 0, 0, 0, 0], # forward_left
[0, 0, 0, 0, 0, 1, 0, 0, 0], # forward_right
[0, 0, 0, 0, 0, 0, 1, 0, 0], # reverse_left
[0, 0, 0, 0, 0, 0, 0, 1, 0], # reverse_right
[0, 0, 0, 0, 0, 0, 0, 0, 1], # stop ~ reset
]
"""
class CollectTrainingData(object):
def __init__(self):
# creating server for camera
self.server_socket = socket.socket()
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind((COMP_IP, COMP_SERV_PORT))
self.server_socket.listen(0)
# accept single connection
self.connection = self.server_socket.accept()[0].makefile('rb')
# create a socket and connect to motor controller
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.connect((RASP_IP, RASP_SERV_PORT))
self.send_motor = True
self.k = np.zeros((4, 4), float)
for i in range(4):
self.k[i, i] = 1
self.temp_label = np.zeros((1, 4), 'float')
pygame.init()
self.collect_data()
def save_image(self,image,filename,label=4):
if label==2:
# save streamed images
cv2.imwrite('training_images/forward/frame_{}.jpg'.format(filename), image)
elif label ==0:
cv2.imwrite('training_images/left/frame_{}.jpg'.format(filename), image)
elif label ==1:
cv2.imwrite('training_images/right/frame_{}.jpg'.format(filename), image)
else :
cv2.imwrite('training_images/unclassified/frame_{}.jpg'.format(filename), image)
def collect_data(self):
saved_frame = 0
total_frame = 0
# collect_images for training
print 'Start collecting images'
e1 = cv2.getTickCount()
image_array = np.zeros((1, 115200))
label_array = np.zeros((1, 4), 'float')
image_to_list=[]
# stream video frames one by one
try:
stream_bytes = ''
frame = 1
while self.send_motor:
# print("reading data")
stream_bytes += self.connection.read(1024)
first = stream_bytes.find('\xff\xd8')
last = stream_bytes.find('\xff\xd9')
if first != -1 and last != -1:
jpg = stream_bytes[first:last + 2]
stream_bytes = stream_bytes[last + 2:]
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_ANYCOLOR)
# select lower half of the image
roi = image[120:240, :]
# save streamed images
#cv2.imwrite('training_images/frame{:>05}.jpg'.format(frame), image)
# cv2.imshow('roi_image',roi)
cv2.imshow('image', image)
# reshape roi image in one array
temp_array = roi.reshape(1, 115200).astype(np.float32)
frame += 1
total_frame += 1
# get input from human driver
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
key_input = pygame.key.get_pressed()
# complex orders
if key_input[pygame.K_UP] and key_input[pygame.K_RIGHT]:
print("Forward Right")
simage_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[1]))
saved_frame += 1
image_name=time.strftime("%m.%d.%Y_%H%M%S")
image_to_list.append([image,image_name,1])
self.save_image(image,image_name,1)
#self.ser.write(chr(6))
self.client_socket.send(command['f_r'])
elif key_input[pygame.K_UP] and key_input[pygame.K_LEFT]:
print("Forward Left")
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[0]))
saved_frame += 1
image_name=time.strftime("%m.%d.%Y_%H%M%S")
image_to_list.append([image,image_name,0])
#self.save_image(image,image_name,0)
self.client_socket.send(command['f_l'])
elif key_input[pygame.K_DOWN] and key_input[pygame.K_RIGHT]:
print("Reverse Right")
self.client_socket.send(command['rev_r'])
elif key_input[pygame.K_DOWN] and key_input[pygame.K_LEFT]:
print("Reverse Left")
self.client_socket.send(command['rev_l'])
# simple orders
elif key_input[pygame.K_UP]:
print("Forward")
saved_frame += 1
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[2]))
image_name=time.strftime("%m.%d.%Y_%H%M%S")
image_to_list.append([image,image_name,2])
#self.save_image(image,image_name,2)
self.client_socket.send(command['f'])
elif key_input[pygame.K_DOWN]:
print("Reverse")
saved_frame += 1
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[3]))
self.client_socket.send(command['rev'])
elif key_input[pygame.K_RIGHT]:
print("Right")
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[1]))
saved_frame += 1
image_name=time.strftime("%m.%d.%Y_%H%M%S")
image_to_list.append([image,image_name,1])
#self.save_image(image,image_name,1)
self.client_socket.send(command['r'])
elif key_input[pygame.K_LEFT]:
print("Left")
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[0]))
saved_frame += 1
image_name=time.strftime("%m.%d.%Y_%H%M%S")
image_to_list.append([image,image_name,0])
#self.save_image(image,image_name,0)
self.client_socket.send(command['l'])
elif key_input[pygame.K_x] or key_input[pygame.K_q]:
print 'exit'
self.send_motor = False
self.client_socket.send(command['rs'])
break
elif event.type == pygame.KEYUP:
self.client_socket.send(command['rs'])
#print(stream_bytes)
#self.client_socket.send(stream_bytes)
# save training images and labels
train = image_array[1:, :]
train_labels = label_array[1:, :]
# save training data as a numpy file
file_name = str(int(time.time()))
directory = "training_data"
if not os.path.exists(directory):
os.makedirs(directory)
try:
np.savez(directory + '/' + file_name + '.npz', train=train, train_labels=train_labels)
except IOError as e:
print(e)
for i in image_to_list:
self.save_image(i[0],i[1],i[2])
e2 = cv2.getTickCount()
# calculate streaming duration
time0 = (e2 - e1) / cv2.getTickFrequency()
print 'Streaming duration:', time0
print(train.shape)
print(train_labels.shape)
print 'Total frame:', total_frame
print 'Saved frame:', saved_frame
print 'Dropped frame', total_frame - saved_frame
finally:
self.connection.close()
self.server_socket.close()
self.client_socket.close()
if __name__ == '__main__':
CollectTrainingData()
| {
"content_hash": "18d379df66cddf41208540769ca2c106",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 102,
"avg_line_length": 39.584,
"alnum_prop": 0.4597817299919159,
"repo_name": "IsmoilovMuhriddin/allgo",
"id": "869eef11054ac19d93237d0ec740d1db66cdad0c",
"size": "9896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "comp/collect_training_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1062"
},
{
"name": "CMake",
"bytes": "238"
},
{
"name": "Python",
"bytes": "231301"
}
],
"symlink_target": ""
} |
""" Environment
"""
import gym
import numpy as np
import DQNConfig as config
import random
class DQNEnvironment:
def __init__(self, environment_name, display=False, frame_skipping=True):
self._env = gym.make(environment_name)
self._dispaly = display
self._done = True
self._total_episode_reward = 0
self._frame_skipping = frame_skipping
def step(self, action):
# the agent only sees and select action on every k frame (the last action is repeated in the skipped frames)
start_lives = self._env.unwrapped.ale.lives()
accumulated_reward = 0
if self._frame_skipping:
skip_interval = config.frame_skip_interval
else:
skip_interval = 1
for i in range(skip_interval):
observation, reward, self._done, info = self._env.step(action)
accumulated_reward += reward
if self._dispaly:
self._env.render()
if self._done:
break
end_lives = self._env.unwrapped.ale.lives()
if config.life_drop_penalty:
if end_lives < start_lives:
accumulated_reward -= 1
if config.reward_clipping:
accumulated_reward = np.clip(accumulated_reward, -1, 1)
self._total_episode_reward += accumulated_reward
return observation, accumulated_reward, self._done
def reset(self):
observation = self._env.reset()
self._done = False
self._total_episode_reward = 0
return observation
def reset_random(self):
observation = self.reset()
for _ in range(random.randrange(0, config.random_start-1)):
observation, _, done, _ = self._env.step(action=random.randrange(0, self.get_num_actions()))
if done:
observation = self.reset()
return observation
def render(self):
self._env.render()
def get_num_actions(self):
return self._env.action_space.n
def episode_done(self):
return self._done
def get_total_reward(self):
return self._total_episode_reward
| {
"content_hash": "e14abfb0f545074fdb782ea73b79f28c",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 116,
"avg_line_length": 31.485294117647058,
"alnum_prop": 0.5987856141989725,
"repo_name": "YuMao1993/DRL",
"id": "f17092526b24c875d610435a0d089d656f7bb0a9",
"size": "2141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DQN/DQNEnv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "412836"
},
{
"name": "Shell",
"bytes": "1976"
}
],
"symlink_target": ""
} |
"""Package for ox_herd plugins.
""" | {
"content_hash": "99b0254b6eba28f96c344900e7117c55",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 31,
"avg_line_length": 17.5,
"alnum_prop": 0.6571428571428571,
"repo_name": "aocks/ox_herd",
"id": "e6370b90c91975fab237149ac1ccae3500c43070",
"size": "35",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ox_herd/core/plugins/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "30097"
},
{
"name": "Dockerfile",
"bytes": "3198"
},
{
"name": "HTML",
"bytes": "20012"
},
{
"name": "Makefile",
"bytes": "314"
},
{
"name": "Python",
"bytes": "159512"
},
{
"name": "Shell",
"bytes": "718"
}
],
"symlink_target": ""
} |
"""
Handle a restricted form of generators and inlining.
"""
from __future__ import print_function, division, absolute_import
from collections import namedtuple
from functools import partial
from flypy.errors import error
from flypy.runtime import builtins
from flypy.compiler.optimizations import inlining, reg2mem
from pykit.ir import Builder, copying
from pykit.analysis import loop_detection, callgraph
from pykit.utils import listify
#===------------------------------------------------------------------===
# Driver
#===------------------------------------------------------------------===
def generator_fusion(func, env):
changed = True
envs = env['flypy.state.envs']
dependences = callgraph.callgraph(func).node
while changed:
changed = False
for f in dependences:
e = envs[f]
consumers = identify_consumers(f, e)
#print("consumers", f.name, consumers)
fuse_generators(f, e, consumers)
changed |= bool(consumers)
#===------------------------------------------------------------------===
# Consumer Identification
#===------------------------------------------------------------------===
Consumer = namedtuple('Consumer', ['generator', 'iter', 'next', 'loop'])
@listify
def identify_consumers(func, env):
"""
Identify consumers of generators, that is find the loops that iterate
over a generator.
"""
generator_objects = find_generators(func, env)
#print("generators", generator_objects)
if not generator_objects:
# We can stop now
return
loop_forest = loop_detection.find_natural_loops(func)
loops = loop_detection.flatloops(loop_forest)
heads = dict((loop.head, loop) for loop in loops)
expect_call = partial(expect_single_call, func, env)
#print("loops", loops, "heads", heads)
for generator_obj in generator_objects:
# Check for a nesting of next(iter(my_generator()))
iter = expect_call(generator_obj, builtins.iter)
next = expect_call(iter, builtins.next)
if iter and next and next.block in heads:
loop = heads[next.block]
yield Consumer(generator_obj, iter, next, loop)
@listify
def find_generators(func, env):
"""
Find all calls to generators.
"""
envs = env['flypy.state.envs']
for op in func.ops:
if op.opcode == 'call':
f, args = op.args
if f in envs and envs[f]['flypy.state.generator'] == 1:
yield op
# -- helpers -- #
def expect_single_call(func, env, defining_op, flypy_func):
if not defining_op or defining_op.opcode != 'call':
return
uses = func.uses[defining_op]
if len(uses) == 1:
[op] = uses
if op.opcode == 'call':
f, args = op.args
envs = env['flypy.state.envs']
e = envs[f]
if e['flypy.state.function_wrapper'] == flypy_func:
return op
#===------------------------------------------------------------------===
# Generator Fusion
#===------------------------------------------------------------------===
def fuse_generators(func, env, consumers):
"""
Rewrite straightforward uses of generators, i.e. where a generator is
allocated and consumed by a single consumer loop.
"""
envs = env['flypy.state.envs']
for consumer in consumers:
generator_func = consumer.generator.args[0]
empty_body = detach_loop(func, consumer)
move_generator(func, consumer, empty_body)
clean_loop_body(func, consumer)
valuemap = inlining.inline_callee(func, consumer.generator,
env, envs[generator_func])
consume_yields(func, consumer, generator_func, valuemap)
def rewrite_general_generators(func, env):
"""
Rewrite general use of generators.
"""
generator_objects = find_generators(func, env)
for gen in generator_objects:
error(env, 'lower', "Generator %s could not be fused" % (gen,))
#=== rewrites ===#
def detach_loop(func, consumer):
loop, iter = consumer.loop, consumer.iter
for block in loop.blocks:
func.del_block(block)
func.reset_uses()
b = Builder(func)
jump = iter.block.terminator
assert jump.opcode == 'jump' and jump.args[0] == loop.head
jump.delete()
b.position_at_end(iter.block)
_, newblock = b.splitblock(terminate=True)
return newblock
def move_generator(func, consumer, empty_body):
gen = consumer.generator
gen.unlink()
b = Builder(func)
b.position_at_end(empty_body)
b.emit(gen)
with b.at_end(empty_body):
loop_exit = determine_loop_exit(consumer.loop)
b.jump(loop_exit)
def determine_loop_exit(loop):
[exc_setup] = [op for op in loop.head if op.opcode == 'exc_setup']
[loop_exit] = exc_setup.args[0]
return loop_exit
def clean_loop_body(func, consumer):
loop = consumer.loop
# Patch back-edge to jump to loop exit instead
assert loop.tail.terminator.opcode == 'jump', "expected a back-edge"
assert loop.tail.terminator.args[0] == loop.head, "expected a back-edge"
consumer.iter.delete()
loop.tail.terminator.delete()
def consume_yields(func, consumer, generator_func, valuemap):
b = Builder(func)
copier = lambda x : x
loop = consumer.loop
inlined_values = set(valuemap.values())
for block in func.blocks:
if block in inlined_values:
for op in block.ops:
if op.opcode == 'yield':
# -- Replace 'yield' by the loop body -- #
b.position_after(op)
_, resume = b.splitblock()
# Copy blocks
blocks = [copier(block) for block in loop.blocks]
# Insert blocks
prev = op.block
for block in blocks:
func.add_block(block, after=prev)
prev = block
# Fix wiring
b.jump(blocks[0])
b.position_at_end(blocks[-1])
b.jump(resume)
# We just introduced a bunch of copied blocks
func.reset_uses()
# Update phis with new predecessor
b.replace_predecessor(loop.tail, op.block, loop.head)
b.replace_predecessor(loop.tail, op.block, loop.head)
# Replace next() by value produced by yield
value = op.args[0]
consumer.next.replace_uses(value)
op.delete()
# We don't need these anymore
consumer.next.delete() | {
"content_hash": "e6f2121a4bfdb1ea41fcc58b9050fc52",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 76,
"avg_line_length": 31.055299539170505,
"alnum_prop": 0.5583914527377949,
"repo_name": "flypy/flypy",
"id": "6f59fadb57b444fe55bb68e5c7a22873cb75840c",
"size": "6764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flypy/compiler/lower/generators.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "540626"
},
{
"name": "Shell",
"bytes": "5103"
}
],
"symlink_target": ""
} |
from a10sdk.common.A10BaseClass import A10BaseClass
class EntryList(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param active: {"minLength": 1, "maxLength": 63, "type": "string", "format": "string"}
:param name: {"minLength": 1, "maxLength": 63, "type": "string", "format": "string"}
:param modified: {"minLength": 1, "maxLength": 63, "type": "string", "format": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "entry-list"
self.DeviceProxy = ""
self.active = ""
self.name = ""
self.modified = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param entry_list: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"active": {"minLength": 1, "maxLength": 63, "type": "string", "format": "string"}, "optional": true, "name": {"minLength": 1, "maxLength": 63, "type": "string", "format": "string"}, "modified": {"minLength": 1, "maxLength": 63, "type": "string", "format": "string"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.entry_list = []
for keys, value in kwargs.items():
setattr(self,keys, value)
class ActiveBindingTableFile(A10BaseClass):
"""Class Description::
Operational Status for the object active-binding-table-file.
Class active-binding-table-file supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/lw-4o6/active-binding-table-file/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "active-binding-table-file"
self.a10_url="/axapi/v3/cgnv6/lw-4o6/active-binding-table-file/oper"
self.DeviceProxy = ""
self.oper = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
| {
"content_hash": "d915b053a83c43ea6b17ff706389fb01",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 401,
"avg_line_length": 32.451219512195124,
"alnum_prop": 0.6076662908680946,
"repo_name": "amwelch/a10sdk-python",
"id": "0ca35880675fc9503a80a172571c4b30a16598dc",
"size": "2661",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/cgnv6/cgnv6_lw_4o6_active_binding_table_file_oper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956398"
}
],
"symlink_target": ""
} |
import collections
from test_framework import generic_test
from test_framework.test_failure import PropertyName
Rect = collections.namedtuple('Rect', ('x', 'y', 'width', 'height'))
def intersect_rectangle(r1: Rect, r2: Rect) -> Rect:
# TODO - you fill in here.
return Rect(0, 0, 0, 0)
def intersect_rectangle_wrapper(r1, r2):
return intersect_rectangle(Rect(*r1), Rect(*r2))
def res_printer(prop, value):
def fmt(x):
return [x[0], x[1], x[2], x[3]] if x else None
if prop in (PropertyName.EXPECTED, PropertyName.RESULT):
return fmt(value)
else:
return value
if __name__ == '__main__':
exit(
generic_test.generic_test_main('rectangle_intersection.py',
'rectangle_intersection.tsv',
intersect_rectangle_wrapper,
res_printer=res_printer))
| {
"content_hash": "647a861f199627579fe78e6e50da408a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 68,
"avg_line_length": 28,
"alnum_prop": 0.5800865800865801,
"repo_name": "shobhitmishra/CodingProblems",
"id": "d8898b474302e3627408ea65ae7aaae159d7c4e3",
"size": "924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "epi_judge_python/rectangle_intersection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "854"
},
{
"name": "Makefile",
"bytes": "31844"
},
{
"name": "Python",
"bytes": "437556"
}
],
"symlink_target": ""
} |
from builtins import object
class CourgetteConfiguration(object):
def __init__(self, specification, parent_resource_name, parent_id, default_values):
"""
"""
self.specification = specification
self.parent_resource_name = parent_resource_name
self.parent_id = parent_id
self.default_values = default_values
| {
"content_hash": "93230fd3f4cae0d50f4a7f276c914955",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 87,
"avg_line_length": 27.846153846153847,
"alnum_prop": 0.6629834254143646,
"repo_name": "little-dude/monolithe",
"id": "70333785dfa8cd03931a583d1dcde0016f1dbd1b",
"size": "1960",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "monolithe/courgette/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16165"
},
{
"name": "HTML",
"bytes": "983593"
},
{
"name": "JavaScript",
"bytes": "93413"
},
{
"name": "Python",
"bytes": "378390"
},
{
"name": "Smarty",
"bytes": "154277"
}
],
"symlink_target": ""
} |
"""module to call main and exit with its return code
"""
import sys
from . import main
sys.exit(
main.main(
sys.argv[1:]
)
)
| {
"content_hash": "f2e2658ea1fff8d4a76f86c8468334b3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 11.153846153846153,
"alnum_prop": 0.593103448275862,
"repo_name": "MrWinstead/businesscard_puzzle",
"id": "35e72b5dd5fb3f3b1b1519849c46171418d7d9ac",
"size": "168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "businesscard_puzzle/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6698"
},
{
"name": "Shell",
"bytes": "469"
}
],
"symlink_target": ""
} |
"""Matter cluster capability unit test for cluster_base module."""
from gazoo_device.capabilities.matter_clusters.interfaces import cluster_base
from gazoo_device.tests.unit_tests.utils import fake_device_test_case
_FAKE_DEVICE_NAME = "fake-device-name"
_FAKE_ENDPOINT_ID = 1
_FAKE_ATTRIBUTE = "fake_attribute"
class ClusterBaseTest(fake_device_test_case.FakeDeviceTestCase):
"""Unit test for ClusterBase."""
def setUp(self):
super().setUp()
self.uut = cluster_base.ClusterBase(
device_name=_FAKE_DEVICE_NAME,
endpoint_id=_FAKE_ENDPOINT_ID,
read=None,
write=None)
def test_setattr_method_on_failure(self):
"""Verifies the overridden __setattr__ on failure.
The on success scenarios are covered by the other cluster unit tests.
"""
with self.assertRaisesRegex(
AttributeError, f"Invalid attribute '{_FAKE_ATTRIBUTE}' to set"):
self.uut.fake_attribute = 0
if __name__ == "__main__":
fake_device_test_case.main()
| {
"content_hash": "810bb818412111bdbccabd22a7c56dbf",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 77,
"avg_line_length": 31.125,
"alnum_prop": 0.6967871485943775,
"repo_name": "google/gazoo-device",
"id": "afe021d5609f94da0b3a1e555438512729ff3016",
"size": "1572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gazoo_device/tests/unit_tests/capability_tests/matter_cluster_tests/cluster_base_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3014778"
},
{
"name": "Shell",
"bytes": "19088"
}
],
"symlink_target": ""
} |
import datetime
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.models import User
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sites.models import Site
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.core.handlers.wsgi import WSGIRequest
from django.test import Client
from django.test import TestCase
from registration import forms
from registration import signals
from registration.admin import RegistrationAdmin
from registration.backends import get_backend
from registration.backends.default import DefaultBackend
from registration.backends.simple import SimpleBackend
from registration.models import RegistrationProfile
class _MockRequestClient(Client):
"""
A ``django.test.Client`` subclass which can return mock
``HttpRequest`` objects.
"""
def request(self, **request):
"""
Rather than issuing a request and returning the response, this
simply constructs an ``HttpRequest`` object and returns it.
"""
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1,0),
'wsgi.url_scheme': 'http',
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
request = WSGIRequest(environ)
# We have to manually add a session since we'll be bypassing
# the middleware chain.
session_middleware = SessionMiddleware()
session_middleware.process_request(request)
return request
def _mock_request():
"""
Construct and return a mock ``HttpRequest`` object; this is used
in testing backend methods which expect an ``HttpRequest`` but
which are not being called from views.
"""
return _MockRequestClient().request()
class BackendRetrievalTests(TestCase):
"""
Test that utilities for retrieving the active backend work
properly.
"""
def test_get_backend(self):
"""
Verify that ``get_backend()`` returns the correct value when
passed a valid backend.
"""
self.failUnless(isinstance(get_backend('registration.backends.default.DefaultBackend'),
DefaultBackend))
def test_backend_error_invalid(self):
"""
Test that a nonexistent/unimportable backend raises the
correct exception.
"""
self.assertRaises(ImproperlyConfigured, get_backend,
'registration.backends.doesnotexist.NonExistentBackend')
def test_backend_attribute_error(self):
"""
Test that a backend module which exists but does not have a
class of the specified name raises the correct exception.
"""
self.assertRaises(ImproperlyConfigured, get_backend,
'registration.backends.default.NonexistentBackend')
class DefaultRegistrationBackendTests(TestCase):
"""
Test the default registration backend.
Running these tests successfull will require two templates to be
created for the sending of activation emails; details on these
templates and their contexts may be found in the documentation for
the default backend.
"""
backend = DefaultBackend()
def setUp(self):
"""
Create an instance of the default backend for use in testing,
and set ``ACCOUNT_ACTIVATION_DAYS`` if it's not set already.
"""
self.old_activation = getattr(settings, 'ACCOUNT_ACTIVATION_DAYS', None)
if self.old_activation is None:
settings.ACCOUNT_ACTIVATION_DAYS = 7
def tearDown(self):
"""
Yank out ``ACCOUNT_ACTIVATION_DAYS`` back out if it wasn't
originally set.
"""
if self.old_activation is None:
settings.ACCOUNT_ACTIVATION_DAYS = self.old_activation
def test_registration(self):
"""
Test the registration process: registration creates a new
inactive account and a new profile with activation key,
populates the correct account data and sends an activation
email.
"""
new_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
# Details of the returned user must match what went in.
self.assertEqual(new_user.username, 'bob')
self.failUnless(new_user.check_password('secret'))
self.assertEqual(new_user.email, 'bob@example.com')
# New user must not be active.
self.failIf(new_user.is_active)
# A registration profile was created, and an activation email
# was sent.
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(len(mail.outbox), 1)
def test_registration_no_sites(self):
"""
Test that registration still functions properly when
``django.contrib.sites`` is not installed; the fallback will
be a ``RequestSite`` instance.
"""
Site._meta.installed = False
new_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
self.assertEqual(new_user.username, 'bob')
self.failUnless(new_user.check_password('secret'))
self.assertEqual(new_user.email, 'bob@example.com')
self.failIf(new_user.is_active)
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(len(mail.outbox), 1)
Site._meta.installed = True
def test_valid_activation(self):
"""
Test the activation process: activating within the permitted
window sets the account's ``is_active`` field to ``True`` and
resets the activation key.
"""
valid_user = self.backend.register(_mock_request(),
username='alice',
email='alice@example.com',
password1='swordfish')
valid_profile = RegistrationProfile.objects.get(user=valid_user)
activated = self.backend.activate(_mock_request(),
valid_profile.activation_key)
self.assertEqual(activated.username, valid_user.username)
self.failUnless(activated.is_active)
# Fetch the profile again to verify its activation key has
# been reset.
valid_profile = RegistrationProfile.objects.get(user=valid_user)
self.assertEqual(valid_profile.activation_key,
RegistrationProfile.ACTIVATED)
def test_invalid_activation(self):
"""
Test the activation process: trying to activate outside the
permitted window fails, and leaves the account inactive.
"""
expired_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
expired_user.date_joined = expired_user.date_joined - datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
expired_user.save()
expired_profile = RegistrationProfile.objects.get(user=expired_user)
self.failIf(self.backend.activate(_mock_request(),
expired_profile.activation_key))
self.failUnless(expired_profile.activation_key_expired())
def test_allow(self):
"""
Test that the setting ``REGISTRATION_OPEN`` appropriately
controls whether registration is permitted.
"""
old_allowed = getattr(settings, 'REGISTRATION_OPEN', True)
settings.REGISTRATION_OPEN = True
self.failUnless(self.backend.registration_allowed(_mock_request()))
settings.REGISTRATION_OPEN = False
self.failIf(self.backend.registration_allowed(_mock_request()))
settings.REGISTRATION_OPEN = old_allowed
def test_form_class(self):
"""
Test that the default form class returned is
``registration.forms.RegistrationForm``.
"""
self.failUnless(self.backend.get_form_class(_mock_request()) is forms.RegistrationForm)
def test_post_registration_redirect(self):
"""
Test that the default post-registration redirect is the named
pattern ``registration_complete``.
"""
self.assertEqual(self.backend.post_registration_redirect(_mock_request(), User()),
('registration_complete', (), {}))
def test_registration_signal(self):
"""
Test that registering a user sends the ``user_registered``
signal.
"""
def receiver(sender, **kwargs):
self.failUnless('user' in kwargs)
self.assertEqual(kwargs['user'].username, 'bob')
self.failUnless('request' in kwargs)
self.failUnless(isinstance(kwargs['request'], WSGIRequest))
received_signals.append(kwargs.get('signal'))
received_signals = []
signals.user_registered.connect(receiver, sender=self.backend.__class__)
self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
self.assertEqual(len(received_signals), 1)
self.assertEqual(received_signals, [signals.user_registered])
def test_activation_signal_success(self):
"""
Test that successfully activating a user sends the
``user_activated`` signal.
"""
def receiver(sender, **kwargs):
self.failUnless('user' in kwargs)
self.assertEqual(kwargs['user'].username, 'bob')
self.failUnless('request' in kwargs)
self.failUnless(isinstance(kwargs['request'], WSGIRequest))
received_signals.append(kwargs.get('signal'))
received_signals = []
signals.user_activated.connect(receiver, sender=self.backend.__class__)
new_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
profile = RegistrationProfile.objects.get(user=new_user)
self.backend.activate(_mock_request(), profile.activation_key)
self.assertEqual(len(received_signals), 1)
self.assertEqual(received_signals, [signals.user_activated])
def test_activation_signal_failure(self):
"""
Test that an unsuccessful activation attempt does not send the
``user_activated`` signal.
"""
receiver = lambda sender, **kwargs: received_signals.append(kwargs.get('signal'))
received_signals = []
signals.user_activated.connect(receiver, sender=self.backend.__class__)
new_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
new_user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS + 1)
new_user.save()
profile = RegistrationProfile.objects.get(user=new_user)
self.backend.activate(_mock_request(), profile.activation_key)
self.assertEqual(len(received_signals), 0)
def test_email_send_action(self):
"""
Test re-sending of activation emails via admin action.
"""
admin_class = RegistrationAdmin(RegistrationProfile, admin.site)
alice = self.backend.register(_mock_request(),
username='alice',
email='alice@example.com',
password1='swordfish')
admin_class.resend_activation_email(_mock_request(),
RegistrationProfile.objects.all())
self.assertEqual(len(mail.outbox), 2) # One on registering, one more on the resend.
RegistrationProfile.objects.filter(user=alice).update(activation_key=RegistrationProfile.ACTIVATED)
admin_class.resend_activation_email(_mock_request(),
RegistrationProfile.objects.all())
self.assertEqual(len(mail.outbox), 2) # No additional email because the account has activated.
def test_activation_action(self):
"""
Test manual activation of users view admin action.
"""
admin_class = RegistrationAdmin(RegistrationProfile, admin.site)
alice = self.backend.register(_mock_request(),
username='alice',
email='alice@example.com',
password1='swordfish')
admin_class.activate_users(_mock_request(),
RegistrationProfile.objects.all())
self.failUnless(User.objects.get(username='alice').is_active)
class SimpleRegistrationBackendTests(TestCase):
"""
Test the simple registration backend, which does signup and
immediate activation.
"""
backend = SimpleBackend()
def test_registration(self):
"""
Test the registration process: registration creates a new
inactive account and a new profile with activation key,
populates the correct account data and sends an activation
email.
"""
new_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
# Details of the returned user must match what went in.
self.assertEqual(new_user.username, 'bob')
self.failUnless(new_user.check_password('secret'))
self.assertEqual(new_user.email, 'bob@example.com')
# New user must not be active.
self.failUnless(new_user.is_active)
def test_allow(self):
"""
Test that the setting ``REGISTRATION_OPEN`` appropriately
controls whether registration is permitted.
"""
old_allowed = getattr(settings, 'REGISTRATION_OPEN', True)
settings.REGISTRATION_OPEN = True
self.failUnless(self.backend.registration_allowed(_mock_request()))
settings.REGISTRATION_OPEN = False
self.failIf(self.backend.registration_allowed(_mock_request()))
settings.REGISTRATION_OPEN = old_allowed
def test_form_class(self):
"""
Test that the default form class returned is
``registration.forms.RegistrationForm``.
"""
self.failUnless(self.backend.get_form_class(_mock_request()) is forms.RegistrationForm)
def test_post_registration_redirect(self):
"""
Test that the default post-registration redirect is the public
URL of the new user account.
"""
new_user = self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
self.assertEqual(self.backend.post_registration_redirect(_mock_request(), new_user),
(new_user.get_absolute_url(), (), {}))
def test_registration_signal(self):
"""
Test that registering a user sends the ``user_registered``
signal.
"""
def receiver(sender, **kwargs):
self.failUnless('user' in kwargs)
self.assertEqual(kwargs['user'].username, 'bob')
self.failUnless('request' in kwargs)
self.failUnless(isinstance(kwargs['request'], WSGIRequest))
received_signals.append(kwargs.get('signal'))
received_signals = []
signals.user_registered.connect(receiver, sender=self.backend.__class__)
self.backend.register(_mock_request(),
username='bob',
email='bob@example.com',
password1='secret')
self.assertEqual(len(received_signals), 1)
self.assertEqual(received_signals, [signals.user_registered])
def test_activation(self):
"""
Test that activating against this backend is an error.
"""
self.assertRaises(NotImplementedError, self.backend.activate,
request=_mock_request())
def test_post_activation_redirect(self):
"""
Test that asking for a post-activation redirect from this
backend is an error.
"""
self.assertRaises(NotImplementedError, self.backend.post_activation_redirect,
request=_mock_request(), user=User())
| {
"content_hash": "82c50328b8fbaa8acd959ce3fc8a6edc",
"timestamp": "",
"source": "github",
"line_count": 476,
"max_line_length": 119,
"avg_line_length": 37.46218487394958,
"alnum_prop": 0.58838043965904,
"repo_name": "stefankoegl/django-couchdb-utils",
"id": "f87eebeb33ae497a02783e05a253a17e6584bbd3",
"size": "17832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_couchdb_utils/registration/tests/backends.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1322"
},
{
"name": "Python",
"bytes": "142363"
},
{
"name": "Shell",
"bytes": "2985"
}
],
"symlink_target": ""
} |
import math
import numpy as np
import os.path
import pandas as pd
from astropy import constants as const
from pathlib import Path
import artistools as at
import artistools.spectra
import artistools.transitions
modelpath = Path(at.PYDIR, '..', 'tests', 'data')
outputpath = Path(at.PYDIR, '..', 'tests', 'output')
at.enable_diskcache = False
def test_spectraplot():
at.spectra.main(argsraw=[], modelpath=modelpath, outputfile=outputpath, timemin=290, timemax=320)
def test_spectra_frompackets():
at.spectra.main(argsraw=[], modelpath=modelpath, outputfile=os.path.join(outputpath, 'spectrum_from_packets.pdf'),
timemin=290, timemax=320, frompackets=True)
def test_spectra_outputtext():
at.spectra.main(argsraw=[], modelpath=modelpath, output_spectra=True)
def test_spectraemissionplot():
at.spectra.main(argsraw=[], modelpath=modelpath, outputfile=outputpath, timemin=290, timemax=320,
emissionabsorption=True)
def test_spectraemissionplot_nostack():
at.spectra.main(argsraw=[], modelpath=modelpath, outputfile=outputpath, timemin=290, timemax=320,
emissionabsorption=True, nostack=True)
def test_spectra_get_spectrum():
def check_spectrum(dfspectrumpkts):
assert math.isclose(max(dfspectrumpkts['f_lambda']), 2.548532804918824e-13, abs_tol=1e-5)
assert min(dfspectrumpkts['f_lambda']) < 1e-9
assert math.isclose(np.mean(dfspectrumpkts['f_lambda']), 1.0314682640070206e-14, abs_tol=1e-5)
dfspectrum = at.spectra.get_spectrum(modelpath, 55, 65, fnufilterfunc=None)
assert len(dfspectrum['lambda_angstroms']) == 1000
assert len(dfspectrum['f_lambda']) == 1000
assert abs(dfspectrum['lambda_angstroms'].values[-1] - 29920.601421214415) < 1e-5
assert abs(dfspectrum['lambda_angstroms'].values[0] - 600.75759482509852) < 1e-5
check_spectrum(dfspectrum)
lambda_min = dfspectrum['lambda_angstroms'].values[0]
lambda_max = dfspectrum['lambda_angstroms'].values[-1]
timelowdays = at.get_timestep_times_float(modelpath)[55]
timehighdays = at.get_timestep_times_float(modelpath)[65]
dfspectrumpkts = at.spectra.get_spectrum_from_packets(
modelpath, timelowdays=timelowdays, timehighdays=timehighdays, lambda_min=lambda_min, lambda_max=lambda_max)
check_spectrum(dfspectrumpkts)
def test_spectra_get_flux_contributions():
timestepmin = 40
timestepmax = 80
dfspectrum = at.spectra.get_spectrum(
modelpath, timestepmin=timestepmin, timestepmax=timestepmax, fnufilterfunc=None)
integrated_flux_specout = np.trapz(dfspectrum['f_lambda'], x=dfspectrum['lambda_angstroms'])
specdata = pd.read_csv(modelpath / 'spec.out', delim_whitespace=True)
arraynu = specdata.loc[:, '0'].values
arraylambda_angstroms = const.c.to('angstrom/s').value / arraynu
contribution_list, array_flambda_emission_total = at.spectra.get_flux_contributions(
modelpath, timestepmin=timestepmin, timestepmax=timestepmax)
integrated_flux_emission = -np.trapz(array_flambda_emission_total, x=arraylambda_angstroms)
# total spectrum should be equal to the sum of all emission processes
print(f'Integrated flux from spec.out: {integrated_flux_specout}')
print(f'Integrated flux from emission sum: {integrated_flux_emission}')
assert math.isclose(integrated_flux_specout, integrated_flux_emission, rel_tol=4e-3)
# check each bin is not out by a large fraction
diff = [abs(x - y) for x, y in zip(array_flambda_emission_total, dfspectrum['f_lambda'].values)]
print(f'Max f_lambda difference {max(diff) / integrated_flux_specout}')
assert max(diff) / integrated_flux_specout < 2e-3
| {
"content_hash": "e96139ffa7727ad501ac1cdb449d9aea",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 118,
"avg_line_length": 41.17777777777778,
"alnum_prop": 0.7223421478683216,
"repo_name": "lukeshingles/artistools",
"id": "00a2b5f89e2c616806355574f64e42f3efadb976",
"size": "3730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "artistools/spectra/test_spectra.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "437553"
},
{
"name": "Shell",
"bytes": "497"
}
],
"symlink_target": ""
} |
import logging
import uuid
from urllib.parse import urljoin, urlencode, parse_qs
import requests
logger = logging.getLogger('moneybird')
class Authentication(object):
"""
Base class for authentication implementations.
"""
def is_ready(self) -> bool:
"""
Checks whether authentication can be performed. A negative result means that it is certain that a request will
not authenticate.
:return: Whether the authentication is ready to be used.
"""
raise NotImplementedError()
def get_session(self) -> requests.Session:
"""
Creates a new session with the authentication settings applied.
:return: The new session.
"""
raise NotImplementedError()
class TokenAuthentication(Authentication):
"""
Token authentication for the MoneyBird API.
:param auth_token: The authentication token to use.
"""
def __init__(self, auth_token: str = ''):
self.auth_token = auth_token
def set_token(self, auth_token: str):
"""
Sets the authentication token.
:param auth_token: The authentication token to use.
"""
self.auth_token = auth_token
def is_ready(self) -> bool:
return bool(self.auth_token)
def get_session(self) -> requests.Session:
session = requests.Session()
session.headers.update({
'Authorization': 'Bearer %s' % self.auth_token,
})
return session
class OAuthAuthentication(Authentication):
"""
OAuth authentication for the MoneyBird API.
This is a wrapper around TokenAuthentication since token authentication is used after the OAuth process has been
performed. This authentication method cannot be used directly, some work is required since the user has to perform
a number of actions before a token can be obtained.
:param redirect_url: The URL to redirect to after successful authorization.
:param client_id: The OAuth client id obtained from MoneyBird.
:param client_secret: The OAuth client secret obtained from MoneyBird.
:param auth_token: The optional token from an earlier authorization.
"""
base_url = 'https://moneybird.com/oauth/'
auth_url = 'authorize/'
token_url = 'token/'
def __init__(self, redirect_url: str, client_id: str, client_secret: str, auth_token: str = ''):
self.redirect_url = redirect_url
self.client_id = client_id
self.client_secret = client_secret
self.real_auth = TokenAuthentication(auth_token)
def authorize_url(self, scope: list, state: str = None) -> tuple:
"""
Returns the URL to which the user can be redirected to authorize your application to access his/her account. It
will also return the state which can be used for CSRF protection. A state is generated if not passed to this
method.
Example:
>>> auth = OAuthAuthentication('https://example.com/oauth/moneybird/', 'your_id', 'your_secret')
>>> auth.authorize_url()
('https://moneybird.com/oauth/authorize?client_id=your_id&redirect_uri=https%3A%2F%2Fexample.com%2Flogin%2F
moneybird&state=random_string', 'random_string')
:param scope: The requested scope.
:param state: Optional state, when omitted a random value is generated.
:return: 2-tuple containing the URL to redirect the user to and the randomly generated state.
"""
url = urljoin(self.base_url, self.auth_url)
params = {
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': self.redirect_url,
'scope': ' '.join(scope),
'state': state if state is not None else self._generate_state(),
}
return "%s?%s" % (url, urlencode(params)), params['state']
def obtain_token(self, redirect_url: str, state: str) -> str:
"""
Exchange the code that was obtained using `authorize_url` for an authorization token. The code is extracted
from the URL that redirected the user back to your site.
Example:
>>> auth = OAuthAuthentication('https://example.com/oauth/moneybird/', 'your_id', 'your_secret')
>>> auth.obtain_token('https://example.com/oauth/moneybird/?code=any&state=random_string', 'random_string')
'token_for_auth'
>>> auth.is_ready()
True
:param redirect_url: The full URL the user was redirected to.
:param state: The state used in the authorize url.
:return: The authorization token.
"""
url_data = parse_qs(redirect_url.split('?', 1)[1])
if 'error' in url_data:
logger.warning("Error received in OAuth authentication response: %s" % url_data.get('error'))
raise OAuthAuthentication.OAuthError(url_data['error'], url_data.get('error_description', None))
if 'code' not in url_data:
logger.error("The provided URL is not a valid OAuth authentication response: no code")
raise ValueError("The provided URL is not a valid OAuth authentication response: no code")
if state and [state] != url_data['state']:
logger.warning("OAuth CSRF attack detected: the state in the provided URL does not equal the given state")
raise ValueError("CSRF attack detected: the state in the provided URL does not equal the given state")
try:
response = requests.post(
url=urljoin(self.base_url, self.token_url),
data={
'grant_type': 'authorization_code',
'code': url_data['code'][0],
'redirect_uri': self.redirect_url,
'client_id': self.client_id,
'client_secret': self.client_secret,
},
).json()
except ValueError:
logger.error("The OAuth server returned an invalid response when obtaining a token: JSON error")
raise ValueError("The OAuth server returned an invalid response when obtaining a token: JSON error")
if 'error' in response:
logger.warning("Error while obtaining OAuth authorization token: %s" % response['error'])
raise OAuthAuthentication.OAuthError(response['error'], response.get('error', ''))
if 'access_token' not in response:
logger.error("The OAuth server returned an invalid response when obtaining a token: no access token")
raise ValueError("The remote server returned an invalid response when obtaining a token: no access token")
self.real_auth.set_token(response['access_token'])
logger.debug("Obtained authentication token for state %s: %s" % (state, self.real_auth.auth_token))
return response['access_token']
def is_ready(self) -> bool:
return self.real_auth.is_ready()
def get_session(self) -> requests.Session:
return self.real_auth.get_session()
@staticmethod
def _generate_state() -> str:
"""
Generates a new random string to be used as OAuth state.
:return: A randomly generated OAuth state.
"""
state = str(uuid.uuid4()).replace('-', '')
logger.debug("Generated OAuth state: %s" % state)
return state
class OAuthError(Exception):
"""
Exception for OAuth protocol errors.
"""
def __init__(self, error_code: str, description: str = None):
if not error_code:
error_code = 'unknown'
if not description:
description = "Unknown reason"
self.error_code = error_code
msg = "OAuth error (%s): %s" % (error_code, description)
super(OAuthAuthentication.OAuthError, self).__init__(msg)
| {
"content_hash": "b15bfbf816358de49804daa773ff622d",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 119,
"avg_line_length": 39.57286432160804,
"alnum_prop": 0.6246349206349207,
"repo_name": "jjkester/moneybird-python",
"id": "b0d862d407b639806408d5dc7d9378da9cea4a3c",
"size": "7875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moneybird/authentication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23565"
}
],
"symlink_target": ""
} |
''' Configuration settings for the Bridge backend webservices. '''
# Parent directory that the frontend is allowed to load files from.
# Any directory under this will be visible to the frontend.
ARCHIVE_PATH_LEADER = '/Users/skhudiky/workspace/Nano-code/nanograv-pipeline/data/archive/'
JOBS_PATH_LEADER = '/Users/skhudiky/workspace/Nano-code/nanograv-pipeline/data/jobs/'
# Path to the python script for starting residual plotting workflow
RES_SCRIPT = '/Users/skhudiky/workspace/Nano-code/nanograv-pipeline/pge/bin/runPulsarAnalysis.py'
OS_SCRIPT = '/Users/skhudiky/workspace/Nano-code/nanograv-pipeline/pge/bin/runBridgeAnalysis.py'
FSTAT_SCRIPT = '/Users/skhudiky/workspace/Nano-code/nanograv-pipeline/pge/bin/runFstatAnalysis.py' | {
"content_hash": "aabf9896ccef1ae46e9f76fc8dce9c45",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 98,
"avg_line_length": 66.9090909090909,
"alnum_prop": 0.8016304347826086,
"repo_name": "shakeh/bridge-pipeline",
"id": "98a3e8b1b993566d6accfab724c9880004583d8d",
"size": "737",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "webapp/api/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4569"
},
{
"name": "HTML",
"bytes": "19125"
},
{
"name": "JavaScript",
"bytes": "30366"
},
{
"name": "Python",
"bytes": "21192"
},
{
"name": "Shell",
"bytes": "48416"
},
{
"name": "TeX",
"bytes": "139762"
},
{
"name": "XSLT",
"bytes": "24923"
}
],
"symlink_target": ""
} |
from unittest import main
from copy import deepcopy
import numpy as np
import pandas as pd
import calour as ca
from calour._testing import Tests
class MTests(Tests):
def setUp(self):
super().setUp()
self.test1 = ca.read(self.test1_biom, self.test1_samp, self.test1_feat, normalize=None)
def test_join_metadata_fields(self):
# test the default params
newexp = self.test1.join_metadata_fields('id', 'group', axis='s', inplace=False)
self.assertIn('id_group', newexp.sample_metadata.columns)
self.assertEqual(newexp.sample_metadata.loc['S12', 'id_group'], '12.0_2')
# test we didn't change anything besides the new sample metadata column
self.assert_experiment_equal(newexp, self.test1, ignore_md_fields=['id_group'])
def test_join_metadata_fields_complex(self):
# test join feature fields with new field name, separator and inplace
exp = deepcopy(self.test1)
newexp = exp.join_metadata_fields('taxonomy', 'taxonomy', 'test', axis=1, sep=';', inplace=True)
self.assertIs(newexp, exp)
self.assertIn('test', exp.feature_metadata.columns)
self.assertNotIn('test', exp.sample_metadata.columns)
self.assertEqual(exp.feature_metadata['test'].iloc[11], 'bad_bacteria;bad_bacteria')
# test we didn't change anything besides the new sample metadata column
self.assert_experiment_equal(exp, self.test1, ignore_md_fields=['test'])
# test join feature fields with new field name, sepparator, inplace and align
exp = deepcopy(self.test1)
newexp = exp.join_metadata_fields('taxonomy', 'ph', 'test', axis=1, sep=';', inplace=True, pad='-')
self.assertIs(newexp, exp)
self.assertIn('test', exp.feature_metadata.columns)
self.assertNotIn('test', exp.sample_metadata.columns)
self.assertEqual(exp.feature_metadata.loc['AT', 'test'], 'k__Bacteria; p__Tenericutes; c__Mollicutes; o__Mycoplasmatales; f__Mycoplasmataceae; g__Mycoplasma; s__-------------------------;4.1')
# test we didn't change anything besides the new sample metadata column
self.assert_experiment_equal(exp, self.test1, ignore_md_fields=['test'])
def test_join_experiments(self):
# do the famous join experiment to itself trick
texp = deepcopy(self.test1)
newexp = self.test1.join_experiments(texp, field='experiments', prefixes=('c1', ''))
self.assertEqual(len(newexp.feature_metadata), len(self.test1.feature_metadata))
self.assertEqual(len(newexp.sample_metadata), len(self.test1.sample_metadata)*2)
fexp = newexp.filter_samples('experiments', ['other'])
self.assert_experiment_equal(fexp, texp, ignore_md_fields=['experiments'])
def test_join_experiments_featurewise(self):
otu1 = ca.Experiment(np.array([[0, 9], [7, 4]]), sparse=False,
sample_metadata=pd.DataFrame({'category': ['B', 'A'],
'ph': [7.7, 6.6]},
index=['s2', 's1']),
feature_metadata=pd.DataFrame({'motile': ['y', 'n']}, index=['16S1', '16S2']))
otu2 = ca.Experiment(np.array([[6], [8], [10]]), sparse=False,
sample_metadata=pd.DataFrame({'category': ['A', 'B', 'C'],
'ph': [6.6, 7.7, 8.8]},
index=['s1', 's2', 's3']),
feature_metadata=pd.DataFrame({'motile': [None]}, index=['ITS1']))
combined_obs = otu1.join_experiments_featurewise(otu2, 'origin', ('16S', 'ITS'))
combined_exp = ca.Experiment(np.array([[7, 4, 6], [0, 9, 8]]), sparse=False,
sample_metadata=pd.DataFrame({'category': ['A', 'B'],
'ph': [6.6, 7.7]},
index=['s1', 's2']),
feature_metadata=pd.DataFrame({'motile': ['y', 'n', None],
'origin': ['16S', '16S', 'ITS']},
index=['16S1', '16S2', 'ITS1']))
# reorder the samples
combined_obs = combined_obs.filter_ids(combined_exp.sample_metadata.index, axis=0)
self.assert_experiment_equal(combined_obs, combined_exp)
def test_agg_by_metadata(self):
# test default conditions - on samples, not inplace, mean method
newexp = self.test1.aggregate_by_metadata('group')
self.assertEqual(newexp.shape[0], 3)
self.assertEqual(list(newexp.data[:, 3]), [0, 10, 5])
self.assertIsNot(newexp, self.test1)
self.assertEqual(newexp.shape[1], self.test1.shape[1])
# test the counts/original samples per merge value
self.assertCountEqual(newexp.sample_metadata['_calour_merge_ids']['S1'],
';'.join(['S1', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7', 'S8', 'S9', 'S10', 'S11']))
self.assertCountEqual(newexp.sample_metadata['_calour_merge_ids']['S12'],
';'.join(['S12', 'S13', 'S14', 'S15', 'S16', 'S17', 'S18', 'S19', 'S20']))
self.assertEqual(newexp.sample_metadata['_calour_merge_number']['S1'], 11)
self.assertEqual(newexp.sample_metadata['_calour_merge_number']['S12'], 9)
def test_agg_by_metadata_sum(self):
# test on samples, inplace, sum method
newexp = self.test1.aggregate_by_metadata('group', 'sum', inplace=True)
newexp.sparse = False
self.assertEqual(newexp.shape[0], 3)
self.assertEqual(list(newexp.data[:, 3]), [0, 90, 5])
self.assertIs(newexp, self.test1)
self.assertEqual(newexp.shape[1], 12)
if __name__ == "__main__":
main()
| {
"content_hash": "9defb3c4930986eb9fc83af5a91ab983",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 200,
"avg_line_length": 58.262135922330096,
"alnum_prop": 0.5597400433261123,
"repo_name": "RNAer/Calour",
"id": "1118392259dc9862b52cad183801e604042afd95",
"size": "6352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calour/tests/test_manipulation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Gherkin",
"bytes": "5338"
},
{
"name": "Jupyter Notebook",
"bytes": "270154"
},
{
"name": "Makefile",
"bytes": "927"
},
{
"name": "Python",
"bytes": "247846"
}
],
"symlink_target": ""
} |
"""Evaluation utilities."""
import json
import re
import string
import unicodedata
import tensorflow.compat.v1 as tf
def normalize_answer(s):
"""Normalize answer."""
s = unicodedata.normalize("NFD", s)
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def regex_match_score(prediction, ground_truth):
try:
regex = re.compile(
ground_truth, flags=re.IGNORECASE + re.UNICODE + re.MULTILINE)
return regex.match(prediction) is not None
except re.error:
return False
def metric_max_over_ground_truths(metric_fn, prediction,
ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def is_correct(answers, prediction,
is_regex):
if is_regex:
metric_fn = regex_match_score
else:
metric_fn = exact_match_score
return metric_max_over_ground_truths(
metric_fn=metric_fn, prediction=prediction, ground_truths=answers)
def evaluate_predictions_impl(references,
predictions,
is_regex):
"""Calculates and returns metrics."""
missing_predictions = 0
correct = 0
for q, a in references.items():
if q in predictions:
correct += int(
is_correct(answers=a, prediction=predictions[q], is_regex=is_regex))
else:
missing_predictions += 1
return dict( # pytype: disable=bad-return-type # dict-kwargs
missing_predictions=missing_predictions,
num_correct=correct,
num_total=len(references),
accuracy=correct / float(len(references)))
def evaluate_predictions(
references_path,
predictions_path,
is_regex,
answer_field = "answer"):
"""Calculates and returns metrics."""
if is_regex != ("CuratedTrec" in references_path):
print("Warning: regex evaluation should (only) be applied to CuratedTrec.")
references = {}
with tf.io.gfile.GFile(references_path) as f:
for line in f:
example = json.loads(line)
references[example["question"]] = example[answer_field]
print("Found {} references in {}".format(len(references), references_path))
predictions = {}
with tf.io.gfile.GFile(predictions_path) as f:
for line in f:
example = json.loads(line)
predictions[example["question"]] = example["prediction"]
print("Found {} predictions in {}".format(len(predictions), predictions_path))
return evaluate_predictions_impl(
references=references, predictions=predictions, is_regex=is_regex)
| {
"content_hash": "98a2e9eb7a6e57c03f006623805dee2d",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 80,
"avg_line_length": 28.7196261682243,
"alnum_prop": 0.669378457533355,
"repo_name": "google-research/language",
"id": "0ca6118abb2b4c55927953afdec0818d8fad1782",
"size": "3688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "language/orqa/utils/eval_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9834"
},
{
"name": "CSS",
"bytes": "602"
},
{
"name": "HTML",
"bytes": "25162"
},
{
"name": "JavaScript",
"bytes": "8857"
},
{
"name": "Jupyter Notebook",
"bytes": "1505066"
},
{
"name": "Python",
"bytes": "7139472"
},
{
"name": "Shell",
"bytes": "183709"
}
],
"symlink_target": ""
} |
"""Runs a ResNet model on the ImageNet dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app as absl_app
from absl import flags
import tensorflow as tf
from official.r1.resnet import imagenet_preprocessing
from official.r1.resnet import resnet_model
from official.r1.resnet import resnet_run_loop
from official.utils.flags import core as flags_core
from official.utils.logs import logger
DEFAULT_IMAGE_SIZE = 224
NUM_CHANNELS = 3
NUM_CLASSES = 1001
NUM_IMAGES = {
'train': 1281167,
'validation': 50000,
}
_NUM_TRAIN_FILES = 1024
_SHUFFLE_BUFFER = 10000
DATASET_NAME = 'ImageNet'
###############################################################################
# Data processing
###############################################################################
def get_filenames(is_training, data_dir):
"""Return filenames for dataset."""
if is_training:
return [
os.path.join(data_dir, 'train-%05d-of-01024' % i)
for i in range(_NUM_TRAIN_FILES)]
else:
return [
os.path.join(data_dir, 'validation-%05d-of-00128' % i)
for i in range(128)]
def _parse_example_proto(example_serialized):
"""Parses an Example proto containing a training example of an image.
The output of the build_image_data.py image preprocessing script is a dataset
containing serialized Example protocol buffers. Each Example proto contains
the following fields (values are included as examples):
image/height: 462
image/width: 581
image/colorspace: 'RGB'
image/channels: 3
image/class/label: 615
image/class/synset: 'n03623198'
image/class/text: 'knee pad'
image/object/bbox/xmin: 0.1
image/object/bbox/xmax: 0.9
image/object/bbox/ymin: 0.2
image/object/bbox/ymax: 0.6
image/object/bbox/label: 615
image/format: 'JPEG'
image/filename: 'ILSVRC2012_val_00041207.JPEG'
image/encoded: <JPEG encoded string>
Args:
example_serialized: scalar Tensor tf.string containing a serialized
Example protocol buffer.
Returns:
image_buffer: Tensor tf.string containing the contents of a JPEG file.
label: Tensor tf.int32 containing the label.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
"""
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.io.FixedLenFeature([], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.io.FixedLenFeature([], dtype=tf.string,
default_value=''),
}
sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update(
{k: sparse_float32 for k in ['image/object/bbox/xmin',
'image/object/bbox/ymin',
'image/object/bbox/xmax',
'image/object/bbox/ymax']})
features = tf.io.parse_single_example(serialized=example_serialized,
features=feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(a=bbox, perm=[0, 2, 1])
return features['image/encoded'], label, bbox
def parse_record(raw_record, is_training, dtype):
"""Parses a record containing a training example of an image.
The input record is parsed into a label and image, and the image is passed
through preprocessing steps (cropping, flipping, and so on).
Args:
raw_record: scalar Tensor tf.string containing a serialized
Example protocol buffer.
is_training: A boolean denoting whether the input is for training.
dtype: data type to use for images/features.
Returns:
Tuple with processed image tensor and one-hot-encoded label tensor.
"""
image_buffer, label, bbox = _parse_example_proto(raw_record)
image = imagenet_preprocessing.preprocess_image(
image_buffer=image_buffer,
bbox=bbox,
output_height=DEFAULT_IMAGE_SIZE,
output_width=DEFAULT_IMAGE_SIZE,
num_channels=NUM_CHANNELS,
is_training=is_training)
image = tf.cast(image, dtype)
return image, label
def input_fn(is_training,
data_dir,
batch_size,
num_epochs=1,
dtype=tf.float32,
datasets_num_private_threads=None,
parse_record_fn=parse_record,
input_context=None,
drop_remainder=False,
tf_data_experimental_slack=False):
"""Input function which provides batches for train or eval.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
num_epochs: The number of epochs to repeat the dataset.
dtype: Data type to use for images/features
datasets_num_private_threads: Number of private threads for tf.data.
parse_record_fn: Function to use for parsing the records.
input_context: A `tf.distribute.InputContext` object passed in by
`tf.distribute.Strategy`.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
tf_data_experimental_slack: Whether to enable tf.data's
`experimental_slack` option.
Returns:
A dataset that can be used for iteration.
"""
filenames = get_filenames(is_training, data_dir)
dataset = tf.data.Dataset.from_tensor_slices(filenames)
if input_context:
tf.compat.v1.logging.info(
'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d' % (
input_context.input_pipeline_id, input_context.num_input_pipelines))
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
if is_training:
# Shuffle the input files
dataset = dataset.shuffle(buffer_size=_NUM_TRAIN_FILES)
# Convert to individual records.
# cycle_length = 10 means that up to 10 files will be read and deserialized in
# parallel. You may want to increase this number if you have a large number of
# CPU cores.
dataset = dataset.interleave(
tf.data.TFRecordDataset,
cycle_length=10,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return resnet_run_loop.process_record_dataset(
dataset=dataset,
is_training=is_training,
batch_size=batch_size,
shuffle_buffer=_SHUFFLE_BUFFER,
parse_record_fn=parse_record_fn,
num_epochs=num_epochs,
dtype=dtype,
datasets_num_private_threads=datasets_num_private_threads,
drop_remainder=drop_remainder,
tf_data_experimental_slack=tf_data_experimental_slack,
)
def get_synth_input_fn(dtype):
return resnet_run_loop.get_synth_input_fn(
DEFAULT_IMAGE_SIZE, DEFAULT_IMAGE_SIZE, NUM_CHANNELS, NUM_CLASSES,
dtype=dtype)
###############################################################################
# Running the model
###############################################################################
class ImagenetModel(resnet_model.Model):
"""Model class with appropriate defaults for Imagenet data."""
def __init__(self, resnet_size, data_format=None, num_classes=NUM_CLASSES,
resnet_version=resnet_model.DEFAULT_VERSION,
dtype=resnet_model.DEFAULT_DTYPE):
"""These are the parameters that work for Imagenet data.
Args:
resnet_size: The number of convolutional layers needed in the model.
data_format: Either 'channels_first' or 'channels_last', specifying which
data format to use when setting up the model.
num_classes: The number of output classes needed from the model. This
enables users to extend the same model to their own datasets.
resnet_version: Integer representing which version of the ResNet network
to use. See README for details. Valid values: [1, 2]
dtype: The TensorFlow dtype to use for calculations.
"""
# For bigger models, we want to use "bottleneck" layers
if resnet_size < 50:
bottleneck = False
else:
bottleneck = True
super(ImagenetModel, self).__init__(
resnet_size=resnet_size,
bottleneck=bottleneck,
num_classes=num_classes,
num_filters=64,
kernel_size=7,
conv_stride=2,
first_pool_size=3,
first_pool_stride=2,
block_sizes=_get_block_sizes(resnet_size),
block_strides=[1, 2, 2, 2],
resnet_version=resnet_version,
data_format=data_format,
dtype=dtype
)
def _get_block_sizes(resnet_size):
"""Retrieve the size of each block_layer in the ResNet model.
The number of block layers used for the Resnet model varies according
to the size of the model. This helper grabs the layer set we want, throwing
an error if a non-standard size has been selected.
Args:
resnet_size: The number of convolutional layers needed in the model.
Returns:
A list of block sizes to use in building the model.
Raises:
KeyError: if invalid resnet_size is received.
"""
choices = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 24, 36, 3]
}
try:
return choices[resnet_size]
except KeyError:
err = ('Could not find layers for selected Resnet size.\n'
'Size received: {}; sizes allowed: {}.'.format(
resnet_size, choices.keys()))
raise ValueError(err)
def imagenet_model_fn(features, labels, mode, params):
"""Our model_fn for ResNet to be used with our Estimator."""
# Warmup and higher lr may not be valid for fine tuning with small batches
# and smaller numbers of training images.
if params['fine_tune']:
warmup = False
base_lr = .1
else:
warmup = True
base_lr = .128
learning_rate_fn = resnet_run_loop.learning_rate_with_decay(
batch_size=params['batch_size'] * params.get('num_workers', 1),
batch_denom=256, num_images=NUM_IMAGES['train'],
boundary_epochs=[30, 60, 80, 90], decay_rates=[1, 0.1, 0.01, 0.001, 1e-4],
warmup=warmup, base_lr=base_lr)
return resnet_run_loop.resnet_model_fn(
features=features,
labels=labels,
mode=mode,
model_class=ImagenetModel,
resnet_size=params['resnet_size'],
weight_decay=flags.FLAGS.weight_decay,
learning_rate_fn=learning_rate_fn,
momentum=0.9,
data_format=params['data_format'],
resnet_version=params['resnet_version'],
loss_scale=params['loss_scale'],
loss_filter_fn=None,
dtype=params['dtype'],
fine_tune=params['fine_tune'],
label_smoothing=flags.FLAGS.label_smoothing
)
def define_imagenet_flags():
resnet_run_loop.define_resnet_flags(
resnet_size_choices=['18', '34', '50', '101', '152', '200'],
dynamic_loss_scale=True,
fp16_implementation=True)
flags.adopt_module_key_flags(resnet_run_loop)
flags_core.set_defaults(train_epochs=90)
def run_imagenet(flags_obj):
"""Run ResNet ImageNet training and eval loop.
Args:
flags_obj: An object containing parsed flag values.
Returns:
Dict of results of the run. Contains the keys `eval_results` and
`train_hooks`. `eval_results` contains accuracy (top_1) and
accuracy_top_5. `train_hooks` is a list the instances of hooks used during
training.
"""
input_function = (flags_obj.use_synthetic_data and
get_synth_input_fn(flags_core.get_tf_dtype(flags_obj)) or
input_fn)
result = resnet_run_loop.resnet_main(
flags_obj, imagenet_model_fn, input_function, DATASET_NAME,
shape=[DEFAULT_IMAGE_SIZE, DEFAULT_IMAGE_SIZE, NUM_CHANNELS])
return result
def main(_):
with logger.benchmark_context(flags.FLAGS):
run_imagenet(flags.FLAGS)
if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
define_imagenet_flags()
absl_app.run(main)
| {
"content_hash": "bbb2c99385f5e5433af187898c2a280f",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 80,
"avg_line_length": 34.188328912466844,
"alnum_prop": 0.6465202886182015,
"repo_name": "alexgorban/models",
"id": "bbff51dc8af1fd24270431361fb7a5d184ac934c",
"size": "13578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "official/r1/resnet/imagenet_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1619012"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33316"
},
{
"name": "Jupyter Notebook",
"bytes": "454746"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "16363107"
},
{
"name": "Shell",
"bytes": "144095"
},
{
"name": "Starlark",
"bytes": "148029"
}
],
"symlink_target": ""
} |
"""
celery.signals
~~~~~~~~~~~~~~
This module defines the signals (Observer pattern) sent by
both workers and clients.
Functions can be connected to these signals, and connected
functions are called whenever a signal is called.
See :ref:`signals` for more information.
"""
from __future__ import absolute_import
from .utils.dispatch import Signal
__all__ = ['before_task_publish', 'after_task_publish',
'task_prerun', 'task_postrun', 'task_success',
'task_retry', 'task_failure', 'task_revoked', 'celeryd_init',
'celeryd_after_setup', 'worker_init', 'worker_process_init',
'worker_ready', 'worker_shutdown', 'setup_logging',
'after_setup_logger', 'after_setup_task_logger',
'beat_init', 'beat_embedded_init', 'eventlet_pool_started',
'eventlet_pool_preshutdown', 'eventlet_pool_postshutdown',
'eventlet_pool_apply']
before_task_publish = Signal(providing_args=[
'body', 'exchange', 'routing_key', 'headers', 'properties',
'declare', 'retry_policy',
])
after_task_publish = Signal(providing_args=[
'body', 'exchange', 'routing_key',
])
#: Deprecated, use after_task_publish instead.
task_sent = Signal(providing_args=[
'task_id', 'task', 'args', 'kwargs', 'eta', 'taskset',
])
task_prerun = Signal(providing_args=['task_id', 'task', 'args', 'kwargs'])
task_postrun = Signal(providing_args=[
'task_id', 'task', 'args', 'kwargs', 'retval',
])
task_success = Signal(providing_args=['result'])
task_retry = Signal(providing_args=[
'request', 'reason', 'einfo',
])
task_failure = Signal(providing_args=[
'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo',
])
task_revoked = Signal(providing_args=[
'request', 'terminated', 'signum', 'expired',
])
celeryd_init = Signal(providing_args=['instance', 'conf', 'options'])
celeryd_after_setup = Signal(providing_args=['instance', 'conf'])
worker_init = Signal(providing_args=[])
worker_process_init = Signal(providing_args=[])
worker_process_shutdown = Signal(providing_args=[])
worker_ready = Signal(providing_args=[])
worker_shutdown = Signal(providing_args=[])
setup_logging = Signal(providing_args=[
'loglevel', 'logfile', 'format', 'colorize',
])
after_setup_logger = Signal(providing_args=[
'logger', 'loglevel', 'logfile', 'format', 'colorize',
])
after_setup_task_logger = Signal(providing_args=[
'logger', 'loglevel', 'logfile', 'format', 'colorize',
])
beat_init = Signal(providing_args=[])
beat_embedded_init = Signal(providing_args=[])
eventlet_pool_started = Signal(providing_args=[])
eventlet_pool_preshutdown = Signal(providing_args=[])
eventlet_pool_postshutdown = Signal(providing_args=[])
eventlet_pool_apply = Signal(providing_args=['target', 'args', 'kwargs'])
user_preload_options = Signal(providing_args=['app', 'options'])
| {
"content_hash": "e3ca61563ae6100932bda6833dbd355e",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 74,
"avg_line_length": 38.67567567567568,
"alnum_prop": 0.6666666666666666,
"repo_name": "sivaprakashniet/push_pull",
"id": "6eae2febff12e5fab08048e7af9a6c02706c2d65",
"size": "2886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p2p/lib/python2.7/site-packages/celery/signals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "33347"
},
{
"name": "CSS",
"bytes": "111284"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "933220"
},
{
"name": "JavaScript",
"bytes": "260224"
},
{
"name": "Nginx",
"bytes": "4758"
},
{
"name": "Python",
"bytes": "9725308"
},
{
"name": "Roff",
"bytes": "17679"
},
{
"name": "Shell",
"bytes": "6008"
}
],
"symlink_target": ""
} |
import os
import tempfile
import testtools
from pwd import getpwnam
from mock import MagicMock
from cloudify.mocks import MockCloudifyContext
from cloudify.exceptions import NonRecoverableError
from cloudify.state import current_ctx
from cloudify_common_sdk._compat import text_type
from .. import tasks as operation_task
class CloudifyFilesTestBase(testtools.TestCase):
@property
def _file_path(self):
_, _file_path = tempfile.mkstemp()
return _file_path
@property
def _downloaded_file_path(self):
_, _file_path = tempfile.mkstemp()
return _file_path
@property
def _owner(self):
# For circle.ci.
_user = os.environ.get('USER', u'circleci')
_group = os.environ.get('GROUP', u'circleci')
# Toggle these for local testing.
return ':'.join([_user, _group])
@property
def _user_id(self):
_owner = self._owner
if not isinstance(_owner, text_type):
return None
split_owner = _owner.split(':')
if not len(split_owner) == 2:
return None
_pwnam = getpwnam(split_owner[0])
return getattr(_pwnam, 'pw_uid')
@property
def _resource_config(self):
_resource_config = {
'resource_path': 'resources/file',
'owner': self._owner,
'mode': 644,
'file_path': self._file_path,
'use_sudo': False
}
return _resource_config
def get_mock_ctx(self):
_ctx = MockCloudifyContext(
node_id='mock',
deployment_id='mock',
operation={'retry_number': 1},
properties={},
runtime_properties={}
)
setattr(
_ctx,
'download_resource',
MagicMock(return_value=self._downloaded_file_path))
return _ctx
def common_asserts(self, _operation_output):
self.assertIs(True, _operation_output)
self.assertIs(
True, os.path.exists(self._file_path))
self.assertIs(
True,
os.access(
self._file_path,
os.R_OK))
self.assertIs(
True,
os.access(
self._file_path,
os.W_OK))
self.assertIs(
False,
os.access(
self._file_path,
os.X_OK))
file_stat = os.stat(self._file_path)
self.assertEqual(self._user_id, getattr(file_stat, 'st_uid'))
def test_operation_create_from_inputs_no_file(self):
"""Test the create function with inputs"""
_ctx = self.get_mock_ctx()
current_ctx.set(_ctx)
resource_config = self._resource_config
resource_config['file_path'] = \
'/aint/no/platform/in/the/world/with/this/dumb/path' \
'yet'
self.addCleanup(os.remove, self._file_path)
raised_error = self.assertRaises(
NonRecoverableError,
operation_task.create,
resource_config=resource_config)
self.assertIn(
'No such file or directory',
str(raised_error))
def test_operation_create_from_inputs(self):
"""Test the create function with inputs"""
_ctx = self.get_mock_ctx()
current_ctx.set(_ctx)
resource_config = self._resource_config
self.addCleanup(os.remove, self._file_path)
operation_output = \
operation_task.create(resource_config=resource_config)
self.common_asserts(operation_output)
def test_operation_create_from_node_properties(self):
"""Test the create function with node properties"""
_ctx = self.get_mock_ctx()
current_ctx.set(_ctx)
_ctx.node.properties['resource_config'] = self._resource_config
self.addCleanup(os.remove, self._file_path)
operation_output = operation_task.create()
self.common_asserts(operation_output)
def test_operation_create_from_runtime_properties(self):
"""Test the create function with runtime properties"""
_ctx = self.get_mock_ctx()
current_ctx.set(_ctx)
_ctx.instance.runtime_properties['resource_config'] = \
self._resource_config
self.addCleanup(os.remove, self._file_path)
operation_output = operation_task.create()
self.common_asserts(operation_output)
def test_operation_delete(self):
"""Test the create function with runtime properties"""
_ctx = self.get_mock_ctx()
current_ctx.set(_ctx)
resource_config = self._resource_config
# self.addCleanup(os.remove, self._file_path)
operation_output = \
operation_task.create(resource_config=resource_config)
self.common_asserts(operation_output)
operation_task.delete(resource_config=resource_config)
self.assertIs(False, os.path.exists(resource_config.get('file_path')))
def test_operation_create_from_inputs_sudo(self):
"""Test the create function with inputs"""
_ctx = self.get_mock_ctx()
current_ctx.set(_ctx)
self._resource_config['use_sudo'] = True
resource_config = self._resource_config
self.addCleanup(os.remove, self._file_path)
operation_output = \
operation_task.create(resource_config=resource_config)
self.common_asserts(operation_output)
def test_operation_create_from_node_properties_sudo(self):
"""Test the create function with node properties"""
_ctx = self.get_mock_ctx()
current_ctx.set(_ctx)
self._resource_config['use_sudo'] = True
_ctx.node.properties['resource_config'] = self._resource_config
self.addCleanup(os.remove, self._file_path)
operation_output = operation_task.create()
self.common_asserts(operation_output)
def test_operation_create_from_runtime_properties_sudo(self):
"""Test the create function with runtime properties"""
_ctx = self.get_mock_ctx()
current_ctx.set(_ctx)
self._resource_config['use_sudo'] = True
_ctx.instance.runtime_properties['resource_config'] = \
self._resource_config
self.addCleanup(os.remove, self._file_path)
operation_output = operation_task.create()
self.common_asserts(operation_output)
def test_operation_delete_sudo(self):
"""Test the create function with runtime properties"""
_ctx = self.get_mock_ctx()
current_ctx.set(_ctx)
self._resource_config['use_sudo'] = True
resource_config = self._resource_config
# self.addCleanup(os.remove, self._file_path)
operation_output = \
operation_task.create(resource_config=resource_config)
self.common_asserts(operation_output)
operation_task.delete(resource_config=resource_config)
self.assertIs(False, os.path.exists(resource_config.get('file_path')))
| {
"content_hash": "b73407d64b751ae326579c72b71f0c0a",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 78,
"avg_line_length": 34.338235294117645,
"alnum_prop": 0.6039971448965025,
"repo_name": "cloudify-incubator/cloudify-utilities-plugin",
"id": "13f90c4952b9ad2dfa2ede2b1a82d1dbc5652afe",
"size": "7625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudify_files/tests/test_cloudify_files.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "83"
},
{
"name": "Python",
"bytes": "533410"
},
{
"name": "Shell",
"bytes": "50"
}
],
"symlink_target": ""
} |
"""
The PyBuilder utils module.
Provides generic utilities that can be used by plugins.
"""
import json
import sys
import subprocess
import tempfile
import time
import traceback
from subprocess import Popen, PIPE
from multiprocessing import Process
import fnmatch
import os
import re
try:
from multiprocessing import SimpleQueue
except ImportError:
from multiprocessing.queues import SimpleQueue
from pybuilder.errors import MissingPrerequisiteException, PyBuilderException
if sys.version_info[0] < 3: # if major is less than 3
from .excp_util_2 import raise_exception, is_string
is_string = is_string
else:
from .excp_util_3 import raise_exception, is_string
is_string = is_string
def get_all_dependencies_for_task(task):
"""
Returns a list containing all tasks required by the given
task function (but not the given task itself)
"""
from pybuilder.reactor import Reactor
task_name = task.__name__
execution_manager = Reactor.current_instance().execution_manager
task_and_all_dependencies = execution_manager.collect_all_transitive_tasks([task_name])
return [dependency for dependency in task_and_all_dependencies if dependency.name != task_name]
def render_report(report_dict):
return json.dumps(report_dict, indent=2, sort_keys=True)
def format_timestamp(timestamp):
return timestamp.strftime("%Y-%m-%d %H:%M:%S")
def timedelta_in_millis(timedelta):
return ((timedelta.days * 24 * 60 * 60) + timedelta.seconds) * 1000 + round(timedelta.microseconds / 1000)
def as_list(*whatever):
"""
Returns a list containing all values given in whatever.
Each list or tuple will be "unpacked", all other elements
are added to the resulting list.
Examples given
>>> as_list('spam')
['spam']
>>> as_list('spam', 'eggs')
['spam', 'eggs']
>>> as_list(('spam', 'eggs'))
['spam', 'eggs']
>>> as_list(['spam', 'eggs'])
['spam', 'eggs']
>>> as_list(['spam', 'eggs'], ('spam', 'eggs'), 'foo', 'bar')
['spam', 'eggs', 'spam', 'eggs', 'foo', 'bar']
"""
result = []
for w in whatever:
if w is None:
continue
elif isinstance(w, list):
result += w
elif isinstance(w, tuple):
result += w
else:
result.append(w)
return result
def remove_leading_slash_or_dot_from_path(path):
if path.startswith('/') or path.startswith('.'):
return path[1:]
return path
def remove_python_source_suffix(file_name):
if file_name.endswith(".py"):
return file_name[0:-len(".py")]
return file_name
def discover_modules(source_path, suffix=".py"):
return discover_modules_matching(source_path, "*{0}".format(suffix))
def discover_modules_matching(source_path, module_glob):
result = []
if not module_glob.endswith(".py"):
module_glob += ".py"
for module_file_path in discover_files_matching(source_path, module_glob):
relative_module_file_path = module_file_path.replace(source_path, "")
relative_module_file_path = relative_module_file_path.replace(os.sep, ".")
module_file = remove_leading_slash_or_dot_from_path(relative_module_file_path)
module_name = remove_python_source_suffix(module_file)
if module_name.endswith(".__init__"):
module_name = module_name.replace(".__init__", "")
result.append(module_name)
return result
def discover_files(start_dir, suffix):
return discover_files_matching(start_dir, "*{0}".format(suffix))
def discover_files_matching(start_dir, file_glob):
for root, _, files in os.walk(start_dir):
for file_name in files:
if fnmatch.fnmatch(file_name, file_glob):
yield os.path.join(root, file_name)
def execute_command(command_and_arguments, outfile_name, env=None, cwd=None, error_file_name=None, shell=False):
if error_file_name is None:
error_file_name = outfile_name + ".err"
with open(outfile_name, "w") as out_file:
with open(error_file_name, "w") as error_file:
process = subprocess.Popen(command_and_arguments,
stdout=out_file,
stderr=error_file,
env=env,
cwd=cwd,
shell=shell)
return process.wait()
def execute_command_and_capture_output(*command_and_arguments):
process_handle = Popen(command_and_arguments, stdout=PIPE, stderr=PIPE)
stdout, stderr = process_handle.communicate()
stdout, stderr = stdout.decode(sys.stdout.encoding or 'utf-8'), stderr.decode(sys.stderr.encoding or 'utf-8')
process_return_code = process_handle.returncode
return process_return_code, stdout, stderr
def assert_can_execute(command_and_arguments, prerequisite, caller):
fd, outfile = tempfile.mkstemp()
f = open(outfile, "w")
try:
process = subprocess.Popen(command_and_arguments, stdout=f, stderr=f, shell=False)
process.wait()
except OSError:
raise MissingPrerequisiteException(prerequisite, caller)
finally:
f.close()
os.close(fd)
os.unlink(outfile)
def read_file(file_name):
with open(file_name, "r") as file_handle:
return file_handle.readlines()
def write_file(file_name, *lines):
with open(file_name, "w") as file_handle:
file_handle.writelines(lines)
class Timer(object):
@staticmethod
def start():
return Timer()
def __init__(self):
self.start_time = time.time()
self.end_time = None
def stop(self):
self.end_time = time.time()
def get_millis(self):
if self.end_time is None:
raise PyBuilderException("Timer is running.")
return int((self.end_time - self.start_time) * 1000)
def apply_on_files(start_directory, closure, globs, *additional_closure_arguments, **keyword_closure_arguments):
glob_expressions = list(map(lambda g: GlobExpression(g), globs))
for root, _, file_names in os.walk(start_directory):
for file_name in file_names:
absolute_file_name = os.path.join(root, file_name)
relative_file_name = absolute_file_name.replace(start_directory, "")[1:]
for glob_expression in glob_expressions:
if glob_expression.matches(relative_file_name):
closure(absolute_file_name,
relative_file_name,
*additional_closure_arguments,
**keyword_closure_arguments)
class GlobExpression(object):
def __init__(self, expression):
self.expression = expression
self.regex = "^" + expression.replace("**", ".+").replace("*", "[^/]*") + "$"
self.pattern = re.compile(self.regex)
def matches(self, path):
if self.pattern.match(path):
return True
return False
def mkdir(directory):
"""
Tries to create the directory denoted by the given name. If it exists and is a directory, nothing will be created
and no error is raised. If it exists as a file a PyBuilderException is raised. Otherwise the directory incl.
all parents is created.
"""
if os.path.exists(directory):
if os.path.isfile(directory):
message = "Unable to created directory '%s': A file with that name already exists"
raise PyBuilderException(message, directory)
return
os.makedirs(directory)
def is_windows():
return "win32" in sys.platform
def fake_windows_fork(group, target, name, args, kwargs):
return 0, target(*args, **kwargs)
def fork_process(logger, group=None, target=None, name=None, args=(), kwargs={}):
"""
Forks a child, making sure that all exceptions from the child are safely sent to the parent
If a target raises an exception, the exception is re-raised in the parent process
@return tuple consisting of process exit code and target's return value
"""
if is_windows():
logger.warn(
"Not forking for %s due to Windows incompatibilities (see #184). "
"Measurements (coverage, etc.) might be biased." % target)
return fake_windows_fork(group, target, name, args, kwargs)
try:
sys.modules["tblib.pickling_support"]
except KeyError:
import tblib.pickling_support
tblib.pickling_support.install()
q = SimpleQueue()
def instrumented_target(*args, **kwargs):
ex = tb = None
try:
send_value = (target(*args, **kwargs), None, None)
except:
_, ex, tb = sys.exc_info()
send_value = (None, ex, tb)
try:
q.put(send_value)
except:
_, send_ex, send_tb = sys.exc_info()
e_out = Exception(str(send_ex), send_tb, None if ex is None else str(ex), tb)
q.put(e_out)
p = Process(group=group, target=instrumented_target, name=name, args=args, kwargs=kwargs)
p.start()
result = q.get()
p.join()
if isinstance(result, tuple):
if result[1]:
raise_exception(result[1], result[2])
return p.exitcode, result[0]
else:
msg = "Fatal error occurred in the forked process %s: %s" % (p, result.args[0])
if result.args[2]:
chained_message = "This error masked the send error '%s':\n%s" % (
result.args[2], "".join(traceback.format_tb(result.args[3])))
msg += "\n" + chained_message
ex = Exception(msg)
raise_exception(ex, result.args[1])
if sys.version_info[0] == 2 and sys.version_info[1] == 6: # if Python is 2.6
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self) == len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
odict = OrderedDict
else:
from collections import OrderedDict
odict = OrderedDict
| {
"content_hash": "ece90193b24622eda651ecf666853d8c",
"timestamp": "",
"source": "github",
"line_count": 570,
"max_line_length": 117,
"avg_line_length": 34.40350877192982,
"alnum_prop": 0.5585415604283529,
"repo_name": "alex-dow/pybuilder",
"id": "b61acd8a840aeed10d462e823a648dfac188fe1b",
"size": "20279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/pybuilder/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "3144"
},
{
"name": "Python",
"bytes": "581057"
}
],
"symlink_target": ""
} |
"""The main entry point for the new development server."""
import argparse
import errno
import getpass
import itertools
import logging
import os
import sys
import tempfile
import time
from google.appengine.api import appinfo
from google.appengine.datastore import datastore_stub_util
from google.appengine.tools import boolean_action
from google.appengine.tools.devappserver2 import api_server
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import dispatcher
from google.appengine.tools.devappserver2 import gcd_application
from google.appengine.tools.devappserver2 import login
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import shutdown
from google.appengine.tools.devappserver2 import update_checker
from google.appengine.tools.devappserver2 import wsgi_request_info
from google.appengine.tools.devappserver2.admin import admin_server
# Initialize logging early -- otherwise some library packages may
# pre-empt our log formatting. NOTE: the level is provisional; it may
# be changed in main() based on the --debug flag.
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s %(filename)s:%(lineno)s] %(message)s')
# Valid choices for --log_level and their corresponding constants in
# runtime_config_pb2.Config.stderr_log_level.
_LOG_LEVEL_TO_RUNTIME_CONSTANT = {
'debug': 0,
'info': 1,
'warning': 2,
'error': 3,
'critical': 4,
}
# Valid choices for --dev_appserver_log_level and their corresponding Python
# logging levels
_LOG_LEVEL_TO_PYTHON_CONSTANT = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
# The default encoding used by the production interpreter.
_PROD_DEFAULT_ENCODING = 'ascii'
def _generate_storage_paths(app_id):
"""Yield an infinite sequence of possible storage paths."""
if sys.platform == 'win32':
# The temp directory is per-user on Windows so there is no reason to add
# the username to the generated directory name.
user_format = ''
else:
try:
user_name = getpass.getuser()
except Exception: # The possible set of exceptions is not documented.
user_format = ''
else:
user_format = '.%s' % user_name
tempdir = tempfile.gettempdir()
yield os.path.join(tempdir, 'appengine.%s%s' % (app_id, user_format))
for i in itertools.count(1):
yield os.path.join(tempdir, 'appengine.%s%s.%d' % (app_id, user_format, i))
def _get_storage_path(path, app_id):
"""Returns a path to the directory where stub data can be stored."""
_, _, app_id = app_id.replace(':', '_').rpartition('~')
if path is None:
for path in _generate_storage_paths(app_id):
try:
os.mkdir(path, 0700)
except OSError, e:
if e.errno == errno.EEXIST:
# Check that the directory is only accessable by the current user to
# protect against an attacker creating the directory in advance in
# order to access any created files. Windows has per-user temporary
# directories and st_mode does not include per-user permission
# information so assume that it is safe.
if sys.platform == 'win32' or (
(os.stat(path).st_mode & 0777) == 0700 and os.path.isdir(path)):
return path
else:
continue
raise
else:
return path
elif not os.path.exists(path):
os.mkdir(path)
return path
elif not os.path.isdir(path):
raise IOError('the given storage path %r is a file, a directory was '
'expected' % path)
else:
return path
class PortParser(object):
"""A parser for ints that represent ports."""
def __init__(self, allow_port_zero=True):
self._min_port = 0 if allow_port_zero else 1
def __call__(self, value):
try:
port = int(value)
except ValueError:
raise argparse.ArgumentTypeError('Invalid port: %r' % value)
if port < self._min_port or port >= (1 << 16):
raise argparse.ArgumentTypeError('Invalid port: %d' % port)
return port
def parse_per_module_option(
value, value_type, value_predicate,
single_bad_type_error, single_bad_predicate_error,
multiple_bad_type_error, multiple_bad_predicate_error,
multiple_duplicate_module_error):
"""Parses command line options that may be specified per-module.
Args:
value: A str containing the flag value to parse. Two formats are supported:
1. A universal value (may not contain a colon as that is use to
indicate a per-module value).
2. Per-module values. One or more comma separated module-value pairs.
Each pair is a module_name:value. An empty module-name is shorthand
for "default" to match how not specifying a module name in the yaml
is the same as specifying "module: default".
value_type: a callable that converts the string representation of the value
to the actual value. Should raise ValueError if the string can not
be converted.
value_predicate: a predicate to call on the converted value to validate
the converted value. Use "lambda _: True" if all values are valid.
single_bad_type_error: the message to use if a universal value is provided
and value_type throws a ValueError. The message must consume a single
format parameter (the provided value).
single_bad_predicate_error: the message to use if a universal value is
provided and value_predicate returns False. The message does not
get any format parameters.
multiple_bad_type_error: the message to use if a per-module value
either does not have two values separated by a single colon or if
value_types throws a ValueError on the second string. The message must
consume a single format parameter (the module_name:value pair).
multiple_bad_predicate_error: the message to use if a per-module value if
value_predicate returns False. The message must consume a single format
parameter (the module name).
multiple_duplicate_module_error: the message to use if the same module is
repeated. The message must consume a single formater parameter (the
module name).
Returns:
Either a single value of value_type for universal values or a dict of
str->value_type for per-module values.
Raises:
argparse.ArgumentTypeError: the value is invalid.
"""
if ':' not in value:
try:
single_value = value_type(value)
except ValueError:
raise argparse.ArgumentTypeError(single_bad_type_error % value)
else:
if not value_predicate(single_value):
raise argparse.ArgumentTypeError(single_bad_predicate_error)
return single_value
else:
module_to_value = {}
for module_value in value.split(','):
try:
module_name, single_value = module_value.split(':')
single_value = value_type(single_value)
except ValueError:
raise argparse.ArgumentTypeError(multiple_bad_type_error % module_value)
else:
module_name = module_name.strip()
if not module_name:
module_name = appinfo.DEFAULT_MODULE
if module_name in module_to_value:
raise argparse.ArgumentTypeError(
multiple_duplicate_module_error % module_name)
if not value_predicate(single_value):
raise argparse.ArgumentTypeError(
multiple_bad_predicate_error % module_name)
module_to_value[module_name] = single_value
return module_to_value
def parse_max_module_instances(value):
"""Returns the parsed value for the --max_module_instances flag.
Args:
value: A str containing the flag value for parse. The format should follow
one of the following examples:
1. "5" - All modules are limited to 5 instances.
2. "default:3,backend:20" - The default module can have 3 instances,
"backend" can have 20 instances and all other modules are
unaffected. An empty name (i.e. ":3") is shorthand for default
to match how not specifying a module name in the yaml is the
same as specifying "module: default".
Returns:
The parsed value of the max_module_instances flag. May either be an int
(for values of the form "5") or a dict of str->int (for values of the
form "default:3,backend:20").
Raises:
argparse.ArgumentTypeError: the value is invalid.
"""
return parse_per_module_option(
value, int, lambda instances: instances > 0,
'Invalid max instance count: %r',
'Max instance count must be greater than zero',
'Expected "module:max_instance_count": %r',
'Max instance count for module %s must be greater than zero',
'Duplicate max instance count for module %s')
def parse_threadsafe_override(value):
"""Returns the parsed value for the --threadsafe_override flag.
Args:
value: A str containing the flag value for parse. The format should follow
one of the following examples:
1. "False" - All modules override the YAML threadsafe configuration
as if the YAML contained False.
2. "default:False,backend:True" - The default module overrides the
YAML threadsafe configuration as if the YAML contained False, the
"backend" module overrides with a value of True and all other
modules use the value in the YAML file. An empty name (i.e.
":True") is shorthand for default to match how not specifying a
module name in the yaml is the same as specifying
"module: default".
Returns:
The parsed value of the threadsafe_override flag. May either be a bool
(for values of the form "False") or a dict of str->bool (for values of the
form "default:False,backend:True").
Raises:
argparse.ArgumentTypeError: the value is invalid.
"""
return parse_per_module_option(
value, boolean_action.BooleanParse, lambda _: True,
'Invalid threadsafe override: %r',
None,
'Expected "module:threadsafe_override": %r',
None,
'Duplicate threadsafe override value for module %s')
def parse_path(value):
"""Returns the given path with ~ and environment variables expanded."""
return os.path.expanduser(os.path.expandvars(value))
def create_command_line_parser():
"""Returns an argparse.ArgumentParser to parse command line arguments."""
# TODO: Add more robust argument validation. Consider what flags
# are actually needed.
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
arg_name = 'yaml_path'
arg_help = 'Path to a yaml file, or a directory containing yaml files'
if application_configuration.java_supported():
arg_name = 'yaml_or_war_path'
arg_help += ', or a directory containing WEB-INF/web.xml'
parser.add_argument(
'config_paths', metavar=arg_name, nargs='+', help=arg_help)
common_group = parser.add_argument_group('Common')
common_group.add_argument(
'-A', '--application', action='store', dest='app_id',
help='Set the application, overriding the application value from the '
'app.yaml file.')
common_group.add_argument(
'--host', default='localhost',
help='host name to which application modules should bind')
common_group.add_argument(
'--port', type=PortParser(), default=8080,
help='lowest port to which application modules should bind')
common_group.add_argument(
'--admin_host', default='localhost',
help='host name to which the admin server should bind')
common_group.add_argument(
'--admin_port', type=PortParser(), default=8000,
help='port to which the admin server should bind')
common_group.add_argument(
'--auth_domain', default='gmail.com',
help='name of the authorization domain to use')
common_group.add_argument(
'--storage_path', metavar='PATH',
type=parse_path,
help='path to the data (datastore, blobstore, etc.) associated with the '
'application.')
common_group.add_argument(
'--log_level', default='info',
choices=_LOG_LEVEL_TO_RUNTIME_CONSTANT.keys(),
help='the log level below which logging messages generated by '
'application code will not be displayed on the console')
common_group.add_argument(
'--max_module_instances',
type=parse_max_module_instances,
help='the maximum number of runtime instances that can be started for a '
'particular module - the value can be an integer, in what case all '
'modules are limited to that number of instances or a comma-seperated '
'list of module:max_instances e.g. "default:5,backend:3"')
common_group.add_argument(
'--use_mtime_file_watcher',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='use mtime polling for detecting source code changes - useful if '
'modifying code from a remote machine using a distributed file system')
common_group.add_argument(
'--threadsafe_override',
type=parse_threadsafe_override,
help='override the application\'s threadsafe configuration - the value '
'can be a boolean, in which case all modules threadsafe setting will '
'be overridden or a comma-separated list of module:threadsafe_override '
'e.g. "default:False,backend:True"')
common_group.add_argument('--enable_mvm_logs',
action=boolean_action.BooleanAction,
const=True,
default=False,
help=argparse.SUPPRESS)
# PHP
php_group = parser.add_argument_group('PHP')
php_group.add_argument('--php_executable_path', metavar='PATH',
type=parse_path,
help='path to the PHP executable')
php_group.add_argument('--php_remote_debugging',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='enable XDebug remote debugging')
php_group.add_argument('--php_gae_extension_path', metavar='PATH',
type=parse_path,
help='path to the GAE PHP extension')
php_group.add_argument('--php_xdebug_extension_path', metavar='PATH',
type=parse_path,
help='path to the xdebug extension')
# Dart
dart_group = parser.add_argument_group('Dart')
dart_group.add_argument('--dart_sdk', help=argparse.SUPPRESS)
dart_group.add_argument('--dart_dev_mode',
choices=['dev', 'deploy'],
help=argparse.SUPPRESS)
dart_group.add_argument('--dart_pub_serve_host', help=argparse.SUPPRESS)
dart_group.add_argument('--dart_pub_serve_port',
type=PortParser(), help=argparse.SUPPRESS)
# App Identity
appidentity_group = parser.add_argument_group('Application Identity')
appidentity_group.add_argument(
'--appidentity_email_address',
help='email address associated with a service account that has a '
'downloadable key. May be None for no local application identity.')
appidentity_group.add_argument(
'--appidentity_private_key_path',
help='path to private key file associated with service account '
'(.pem format). Must be set if appidentity_email_address is set.')
# Python
python_group = parser.add_argument_group('Python')
python_group.add_argument(
'--python_startup_script',
help='the script to run at the startup of new Python runtime instances '
'(useful for tools such as debuggers.')
python_group.add_argument(
'--python_startup_args',
help='the arguments made available to the script specified in '
'--python_startup_script.')
# Java
java_group = parser.add_argument_group('Java')
java_group.add_argument(
'--jvm_flag', action='append',
help='additional arguments to pass to the java command when launching '
'an instance of the app. May be specified more than once. Example: '
'--jvm_flag=-Xmx1024m --jvm_flag=-Xms256m')
# Blobstore
blobstore_group = parser.add_argument_group('Blobstore API')
blobstore_group.add_argument(
'--blobstore_path',
type=parse_path,
help='path to directory used to store blob contents '
'(defaults to a subdirectory of --storage_path if not set)',
default=None)
# Cloud SQL
cloud_sql_group = parser.add_argument_group('Cloud SQL')
cloud_sql_group.add_argument(
'--mysql_host',
default='localhost',
help='host name of a running MySQL server used for simulated Google '
'Cloud SQL storage')
cloud_sql_group.add_argument(
'--mysql_port', type=PortParser(allow_port_zero=False),
default=3306,
help='port number of a running MySQL server used for simulated Google '
'Cloud SQL storage')
cloud_sql_group.add_argument(
'--mysql_user',
default='',
help='username to use when connecting to the MySQL server specified in '
'--mysql_host and --mysql_port or --mysql_socket')
cloud_sql_group.add_argument(
'--mysql_password',
default='',
help='password to use when connecting to the MySQL server specified in '
'--mysql_host and --mysql_port or --mysql_socket')
cloud_sql_group.add_argument(
'--mysql_socket',
help='path to a Unix socket file to use when connecting to a running '
'MySQL server used for simulated Google Cloud SQL storage')
# Datastore
datastore_group = parser.add_argument_group('Datastore API')
datastore_group.add_argument(
'--datastore_path',
type=parse_path,
default=None,
help='path to a file used to store datastore contents '
'(defaults to a file in --storage_path if not set)',)
datastore_group.add_argument('--clear_datastore',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='clear the datastore on startup')
datastore_group.add_argument(
'--datastore_consistency_policy',
default='time',
choices=['consistent', 'random', 'time'],
help='the policy to apply when deciding whether a datastore write should '
'appear in global queries')
datastore_group.add_argument(
'--require_indexes',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='generate an error on datastore queries that '
'requires a composite index not found in index.yaml')
datastore_group.add_argument(
'--auto_id_policy',
default=datastore_stub_util.SCATTERED,
choices=[datastore_stub_util.SEQUENTIAL,
datastore_stub_util.SCATTERED],
help='the type of sequence from which the datastore stub '
'assigns automatic IDs. NOTE: Sequential IDs are '
'deprecated. This flag will be removed in a future '
'release. Please do not rely on sequential IDs in your '
'tests.')
datastore_group.add_argument(
'--enable_cloud_datastore',
action=boolean_action.BooleanAction,
const=True,
default=False,
help=argparse.SUPPRESS #'enable the Google Cloud Datastore API.'
)
# Logs
logs_group = parser.add_argument_group('Logs API')
logs_group.add_argument(
'--logs_path', default=None,
help='path to a file used to store request logs (defaults to a file in '
'--storage_path if not set)',)
# Mail
mail_group = parser.add_argument_group('Mail API')
mail_group.add_argument(
'--show_mail_body',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='logs the contents of e-mails sent using the Mail API')
mail_group.add_argument(
'--enable_sendmail',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='use the "sendmail" tool to transmit e-mail sent '
'using the Mail API (ignored if --smtp_host is set)')
mail_group.add_argument(
'--smtp_host', default='',
help='host name of an SMTP server to use to transmit '
'e-mail sent using the Mail API')
mail_group.add_argument(
'--smtp_port', default=25,
type=PortParser(allow_port_zero=False),
help='port number of an SMTP server to use to transmit '
'e-mail sent using the Mail API (ignored if --smtp_host '
'is not set)')
mail_group.add_argument(
'--smtp_user', default='',
help='username to use when connecting to the SMTP server '
'specified in --smtp_host and --smtp_port')
mail_group.add_argument(
'--smtp_password', default='',
help='password to use when connecting to the SMTP server '
'specified in --smtp_host and --smtp_port')
mail_group.add_argument(
'--smtp_allow_tls',
action=boolean_action.BooleanAction,
const=True,
default=True,
help='Allow TLS to be used when the SMTP server announces TLS support '
'(ignored if --smtp_host is not set)')
# Matcher
prospective_search_group = parser.add_argument_group('Prospective Search API')
prospective_search_group.add_argument(
'--prospective_search_path', default=None,
type=parse_path,
help='path to a file used to store the prospective '
'search subscription index (defaults to a file in '
'--storage_path if not set)')
prospective_search_group.add_argument(
'--clear_prospective_search',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='clear the prospective search subscription index')
# Search
search_group = parser.add_argument_group('Search API')
search_group.add_argument(
'--search_indexes_path', default=None,
type=parse_path,
help='path to a file used to store search indexes '
'(defaults to a file in --storage_path if not set)',)
search_group.add_argument(
'--clear_search_indexes',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='clear the search indexes')
# Taskqueue
taskqueue_group = parser.add_argument_group('Task Queue API')
taskqueue_group.add_argument(
'--enable_task_running',
action=boolean_action.BooleanAction,
const=True,
default=True,
help='run "push" tasks created using the taskqueue API automatically')
# Misc
misc_group = parser.add_argument_group('Miscellaneous')
misc_group.add_argument(
'--allow_skipped_files',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='make files specified in the app.yaml "skip_files" or "static" '
'handles readable by the application.')
# No help to avoid lengthening help message for rarely used feature:
# host name to which the server for API calls should bind.
misc_group.add_argument(
'--api_host', default='localhost',
help=argparse.SUPPRESS)
misc_group.add_argument(
'--api_port', type=PortParser(), default=0,
help='port to which the server for API calls should bind')
misc_group.add_argument(
'--automatic_restart',
action=boolean_action.BooleanAction,
const=True,
default=True,
help=('restart instances automatically when files relevant to their '
'module are changed'))
misc_group.add_argument(
'--dev_appserver_log_level', default='info',
choices=_LOG_LEVEL_TO_PYTHON_CONSTANT.keys(),
help='the log level below which logging messages generated by '
'the development server will not be displayed on the console (this '
'flag is more useful for diagnosing problems in dev_appserver.py rather '
'than in application code)')
misc_group.add_argument(
'--skip_sdk_update_check',
action=boolean_action.BooleanAction,
const=True,
default=False,
help='skip checking for SDK updates (if false, use .appcfg_nag to '
'decide)')
misc_group.add_argument(
'--default_gcs_bucket_name', default=None,
help='default Google Cloud Storage bucket name')
return parser
PARSER = create_command_line_parser()
def _clear_datastore_storage(datastore_path):
"""Delete the datastore storage file at the given path."""
# lexists() returns True for broken symlinks, where exists() returns False.
if os.path.lexists(datastore_path):
try:
os.remove(datastore_path)
except OSError, e:
logging.warning('Failed to remove datastore file %r: %s',
datastore_path,
e)
def _clear_prospective_search_storage(prospective_search_path):
"""Delete the perspective search storage file at the given path."""
# lexists() returns True for broken symlinks, where exists() returns False.
if os.path.lexists(prospective_search_path):
try:
os.remove(prospective_search_path)
except OSError, e:
logging.warning('Failed to remove prospective search file %r: %s',
prospective_search_path,
e)
def _clear_search_indexes_storage(search_index_path):
"""Delete the search indexes storage file at the given path."""
# lexists() returns True for broken symlinks, where exists() returns False.
if os.path.lexists(search_index_path):
try:
os.remove(search_index_path)
except OSError, e:
logging.warning('Failed to remove search indexes file %r: %s',
search_index_path,
e)
def _setup_environ(app_id):
"""Sets up the os.environ dictionary for the front-end server and API server.
This function should only be called once.
Args:
app_id: The id of the application.
"""
os.environ['APPLICATION_ID'] = app_id
class DevelopmentServer(object):
"""Encapsulates the logic for the development server.
Only a single instance of the class may be created per process. See
_setup_environ.
"""
def __init__(self):
# A list of servers that are currently running.
self._running_modules = []
self._module_to_port = {}
self._dispatcher = None
def module_to_address(self, module_name, instance=None):
"""Returns the address of a module."""
if module_name is None:
return self._dispatcher.dispatch_address
return self._dispatcher.get_hostname(
module_name,
self._dispatcher.get_default_version(module_name),
instance)
def start(self, options):
"""Start devappserver2 servers based on the provided command line arguments.
Args:
options: An argparse.Namespace containing the command line arguments.
"""
logging.getLogger().setLevel(
_LOG_LEVEL_TO_PYTHON_CONSTANT[options.dev_appserver_log_level])
configuration = application_configuration.ApplicationConfiguration(
options.config_paths, options.app_id)
if options.enable_cloud_datastore:
# This requires the oauth server stub to return that the logged in user
# is in fact an admin.
os.environ['OAUTH_IS_ADMIN'] = '1'
gcd_module = application_configuration.ModuleConfiguration(
gcd_application.generate_gcd_app(configuration.app_id.split('~')[1]))
configuration.modules.append(gcd_module)
if options.skip_sdk_update_check:
logging.info('Skipping SDK update check.')
else:
update_checker.check_for_updates(configuration)
# There is no good way to set the default encoding from application code
# (it needs to be done during interpreter initialization in site.py or
# sitecustomize.py) so just warn developers if they have a different
# encoding than production.
if sys.getdefaultencoding() != _PROD_DEFAULT_ENCODING:
logging.warning(
'The default encoding of your local Python interpreter is set to %r '
'while App Engine\'s production environment uses %r; as a result '
'your code may behave differently when deployed.',
sys.getdefaultencoding(), _PROD_DEFAULT_ENCODING)
if options.port == 0:
logging.warn('DEFAULT_VERSION_HOSTNAME will not be set correctly with '
'--port=0')
_setup_environ(configuration.app_id)
self._dispatcher = dispatcher.Dispatcher(
configuration,
options.host,
options.port,
options.auth_domain,
_LOG_LEVEL_TO_RUNTIME_CONSTANT[options.log_level],
self._create_php_config(options),
self._create_python_config(options),
self._create_java_config(options),
self._create_cloud_sql_config(options),
self._create_vm_config(options),
self._create_module_to_setting(options.max_module_instances,
configuration, '--max_module_instances'),
options.use_mtime_file_watcher,
options.automatic_restart,
options.allow_skipped_files,
self._create_module_to_setting(options.threadsafe_override,
configuration, '--threadsafe_override'))
request_data = wsgi_request_info.WSGIRequestInfo(self._dispatcher)
storage_path = _get_storage_path(options.storage_path, configuration.app_id)
apis = self._create_api_server(
request_data, storage_path, options, configuration)
apis.start()
self._running_modules.append(apis)
self._dispatcher.start(options.api_host, apis.port, request_data)
xsrf_path = os.path.join(storage_path, 'xsrf')
admin = admin_server.AdminServer(options.admin_host, options.admin_port,
self._dispatcher, configuration, xsrf_path)
admin.start()
self._running_modules.append(admin)
def stop(self):
"""Stops all running devappserver2 modules."""
while self._running_modules:
self._running_modules.pop().quit()
if self._dispatcher:
self._dispatcher.quit()
@staticmethod
def _create_api_server(request_data, storage_path, options, configuration):
datastore_path = options.datastore_path or os.path.join(storage_path,
'datastore.db')
logs_path = options.logs_path or os.path.join(storage_path, 'logs.db')
search_index_path = options.search_indexes_path or os.path.join(
storage_path, 'search_indexes')
prospective_search_path = options.prospective_search_path or os.path.join(
storage_path, 'prospective-search')
blobstore_path = options.blobstore_path or os.path.join(storage_path,
'blobs')
if options.clear_datastore:
_clear_datastore_storage(datastore_path)
if options.clear_prospective_search:
_clear_prospective_search_storage(prospective_search_path)
if options.clear_search_indexes:
_clear_search_indexes_storage(search_index_path)
if options.auto_id_policy==datastore_stub_util.SEQUENTIAL:
logging.warn("--auto_id_policy='sequential' is deprecated. This option "
"will be removed in a future release.")
application_address = '%s' % options.host
if options.port and options.port != 80:
application_address += ':' + str(options.port)
user_login_url = '/%s?%s=%%s' % (login.LOGIN_URL_RELATIVE,
login.CONTINUE_PARAM)
user_logout_url = '%s&%s=%s' % (user_login_url, login.ACTION_PARAM,
login.LOGOUT_ACTION)
if options.datastore_consistency_policy == 'time':
consistency = datastore_stub_util.TimeBasedHRConsistencyPolicy()
elif options.datastore_consistency_policy == 'random':
consistency = datastore_stub_util.PseudoRandomHRConsistencyPolicy()
elif options.datastore_consistency_policy == 'consistent':
consistency = datastore_stub_util.PseudoRandomHRConsistencyPolicy(1.0)
else:
assert 0, ('unknown consistency policy: %r' %
options.datastore_consistency_policy)
api_server.maybe_convert_datastore_file_stub_data_to_sqlite(
configuration.app_id, datastore_path)
api_server.setup_stubs(
request_data=request_data,
app_id=configuration.app_id,
application_root=configuration.modules[0].application_root,
# The "trusted" flag is only relevant for Google administrative
# applications.
trusted=getattr(options, 'trusted', False),
appidentity_email_address=options.appidentity_email_address,
appidentity_private_key_path=os.path.abspath(
options.appidentity_private_key_path)
if options.appidentity_private_key_path else None,
blobstore_path=blobstore_path,
datastore_path=datastore_path,
datastore_consistency=consistency,
datastore_require_indexes=options.require_indexes,
datastore_auto_id_policy=options.auto_id_policy,
images_host_prefix='http://%s' % application_address,
logs_path=logs_path,
mail_smtp_host=options.smtp_host,
mail_smtp_port=options.smtp_port,
mail_smtp_user=options.smtp_user,
mail_smtp_password=options.smtp_password,
mail_enable_sendmail=options.enable_sendmail,
mail_show_mail_body=options.show_mail_body,
mail_allow_tls=options.smtp_allow_tls,
matcher_prospective_search_path=prospective_search_path,
search_index_path=search_index_path,
taskqueue_auto_run_tasks=options.enable_task_running,
taskqueue_default_http_server=application_address,
user_login_url=user_login_url,
user_logout_url=user_logout_url,
default_gcs_bucket_name=options.default_gcs_bucket_name)
return api_server.APIServer(options.api_host, options.api_port,
configuration.app_id)
@staticmethod
def _create_php_config(options):
php_config = runtime_config_pb2.PhpConfig()
if options.php_executable_path:
php_config.php_executable_path = os.path.abspath(
options.php_executable_path)
php_config.enable_debugger = options.php_remote_debugging
if options.php_gae_extension_path:
php_config.gae_extension_path = os.path.abspath(
options.php_gae_extension_path)
if options.php_xdebug_extension_path:
php_config.xdebug_extension_path = os.path.abspath(
options.php_xdebug_extension_path)
return php_config
@staticmethod
def _create_python_config(options):
python_config = runtime_config_pb2.PythonConfig()
if options.python_startup_script:
python_config.startup_script = os.path.abspath(
options.python_startup_script)
if options.python_startup_args:
python_config.startup_args = options.python_startup_args
return python_config
@staticmethod
def _create_java_config(options):
java_config = runtime_config_pb2.JavaConfig()
if options.jvm_flag:
java_config.jvm_args.extend(options.jvm_flag)
return java_config
@staticmethod
def _create_cloud_sql_config(options):
cloud_sql_config = runtime_config_pb2.CloudSQL()
cloud_sql_config.mysql_host = options.mysql_host
cloud_sql_config.mysql_port = options.mysql_port
cloud_sql_config.mysql_user = options.mysql_user
cloud_sql_config.mysql_password = options.mysql_password
if options.mysql_socket:
cloud_sql_config.mysql_socket = options.mysql_socket
return cloud_sql_config
@staticmethod
def _create_vm_config(options):
vm_config = runtime_config_pb2.VMConfig()
if options.dart_sdk:
vm_config.dart_config.dart_sdk = os.path.abspath(options.dart_sdk)
if options.dart_dev_mode:
vm_config.dart_config.dart_dev_mode = options.dart_dev_mode
if options.dart_pub_serve_host:
vm_config.dart_config.dart_pub_serve_host = options.dart_pub_serve_host
if options.dart_pub_serve_port:
vm_config.dart_config.dart_pub_serve_port = options.dart_pub_serve_port
vm_config.enable_logs = options.enable_mvm_logs
return vm_config
@staticmethod
def _create_module_to_setting(setting, configuration, option):
"""Create a per-module dictionary configuration.
Creates a dictionary that maps a module name to a configuration
setting. Used in conjunction with parse_per_module_option.
Args:
setting: a value that can be None, a dict of str->type or a single value.
configuration: an ApplicationConfiguration object.
option: the option name the setting came from.
Returns:
A dict of str->type.
"""
if setting is None:
return {}
module_names = [module_configuration.module_name
for module_configuration in configuration.modules]
if isinstance(setting, dict):
# Warn and remove a setting if the module name is unknown.
module_to_setting = {}
for module_name, value in setting.items():
if module_name in module_names:
module_to_setting[module_name] = value
else:
logging.warning('Unknown module %r for %r', module_name, option)
return module_to_setting
# Create a dict with an entry for every module.
return {module_name: setting for module_name in module_names}
def main():
shutdown.install_signal_handlers()
# The timezone must be set in the devappserver2 process rather than just in
# the runtime so printed log timestamps are consistent and the taskqueue stub
# expects the timezone to be UTC. The runtime inherits the environment.
os.environ['TZ'] = 'UTC'
if hasattr(time, 'tzset'):
# time.tzet() should be called on Unix, but doesn't exist on Windows.
time.tzset()
options = PARSER.parse_args()
dev_server = DevelopmentServer()
try:
dev_server.start(options)
shutdown.wait_until_shutdown()
finally:
dev_server.stop()
if __name__ == '__main__':
main()
| {
"content_hash": "73941ec598b1e214a220ce2f49cf4330",
"timestamp": "",
"source": "github",
"line_count": 969,
"max_line_length": 80,
"avg_line_length": 39.31475748194014,
"alnum_prop": 0.6671829063418732,
"repo_name": "ychen820/microblog",
"id": "410c3ad7beb2d894cabbf1dbcf829713179e0255",
"size": "38697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "y/google-cloud-sdk/platform/google_appengine/google/appengine/tools/devappserver2/devappserver2.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import sys, os
# The dependencies variable is used by MAP Client to
# determine if further downloads are required. Please
# list all dependencies here.
dependencies = [] # Insert plugin dependencies here
setup(name=u'mapclientplugins.filechooserstep',
version='0.0',
description='',
long_description="",
classifiers=[],
author=u'Hugh Sorby',
author_email='',
url='',
license='GPL',
packages=find_packages(exclude=['ez_setup',]),
namespace_packages=['mapclientplugins'],
include_package_data=True,
zip_safe=False,
install_requires=dependencies,
)
| {
"content_hash": "0ba5404ebf8f385ca38f7d540a09ca8d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 54,
"avg_line_length": 29.434782608695652,
"alnum_prop": 0.6676514032496307,
"repo_name": "mapclient-plugins/filechooser",
"id": "b8418467c621054520e1d7402b353c63a91dde03",
"size": "677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21895"
}
],
"symlink_target": ""
} |
import os
import time
import zlib
from . import consts
__all__ = ("ZipStream", )
class Processor:
def __init__(self, file_struct):
self.crc = 0
self.o_size = self.c_size = 0
if file_struct['cmethod'] is None:
self.process = self._process_through
self.tail = self._no_tail
elif file_struct['cmethod'] == 'deflate':
self.compr = zlib.compressobj(5, zlib.DEFLATED, -15)
self.process = self._process_deflate
self.tail = self._tail_deflate
# no compression
def _process_through(self, chunk):
self.o_size += len(chunk)
self.c_size = self.o_size
self.crc = zlib.crc32(chunk, self.crc)
return chunk
def _no_tail(self):
return b''
# deflate compression
def _process_deflate(self, chunk):
self.o_size += len(chunk)
self.crc = zlib.crc32(chunk, self.crc)
chunk = self.compr.compress(chunk)
self.c_size += len(chunk)
return chunk
def _tail_deflate(self):
chunk = self.compr.flush(zlib.Z_FINISH)
self.c_size += len(chunk)
return chunk
# after processing counters and crc
def state(self):
return self.crc, self.o_size, self.c_size
class ZipBase:
def __init__(self, files=[], chunksize=1024):
"""
files - list of files, or generator returning files
each file entry should be represented as dict with
parameters:
file - full path to file name
name - (optional) name of file in zip archive
if not used, filename stripped from 'file' will be used
stream - (optional) can be used as replacement for 'file'
entry, will be treated as generator returnig
chunks of data that will be streamed in archive.
If used, then 'name' entry is required.
chunksize - default size of data block streamed from files
"""
self._source_of_files = files
self.__files = []
self.__version = consts.ZIP32_VERSION
self.zip64 = False
self.chunksize = chunksize
# this flag tuns on signature for data descriptor record.
# see section 4.3.9.3 of ZIP File Format Specification
self.__use_ddmagic = True
# central directory size and placement
self.__cdir_size = self.__offset = 0
def zip64_required(self):
"""
Turn on zip64 mode for archive
"""
raise NotImplementedError("Zip64 is not supported yet")
def _create_file_struct(self, data):
"""
extract info about streamed file and return all processed data
required in zip archive
"""
# date and time of file
dt = time.localtime()
dosdate = ((dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]) \
& 0xffff
dostime = (dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)) \
& 0xffff
# check zip32 limit
# stats = os.stat(data['file'])
# if stats.st_size > consts.ZIP32_LIMIT:
# self.zip64_required()
# file properties used in zip
file_struct = {'mod_time': dostime,
'mod_date': dosdate,
'crc': 0, # will be calculated during data streaming
"offset": 0, # file header offset in zip file
'flags': 0b00001000} # flag about using data descriptor is always on
if 'file' in data:
file_struct['src'] = data['file']
file_struct['stype'] = 'f'
elif 'stream' in data:
file_struct['src'] = data['stream']
file_struct['stype'] = 's'
else:
raise Exception('No file or stream in sources')
cmpr = data.get('compression', None)
if cmpr not in (None, 'deflate'):
raise Exception('Unknown compression method %r' % cmpr)
file_struct['cmethod'] = cmpr
file_struct['cmpr_id'] = {
None: consts.COMPRESSION_STORE,
'deflate': consts.COMPRESSION_DEFLATE}[cmpr]
# file name in archive
if 'name' not in data:
data['name'] = os.path.basename(data['file'])
try:
file_struct['fname'] = data['name'].encode("ascii")
except UnicodeError:
file_struct['fname'] = data['name'].encode("utf-8")
file_struct['flags'] |= consts.UTF8_FLAG
return file_struct
# zip structures creation
def _make_extra_field(self, signature, data):
"""
Extra field for file
"""
fields = {"signature": signature,
"size": len(data)}
head = consts.EXTRA_TUPLE(**fields)
head = consts.EXTRA_STRUCT.pack(*head)
return head + data
def _make_local_file_header(self, file_struct):
"""
Create file header
"""
fields = {"signature": consts.LF_MAGIC,
"version": self.__version,
"flags": file_struct['flags'],
"compression": file_struct['cmpr_id'],
"mod_time": file_struct['mod_time'],
"mod_date": file_struct['mod_date'],
"crc": 0,
"uncomp_size": 0,
"comp_size": 0,
"fname_len": len(file_struct['fname']),
"extra_len": 0}
head = consts.LF_TUPLE(**fields)
head = consts.LF_STRUCT.pack(*head)
head += file_struct['fname']
return head
def _make_data_descriptor(self, file_struct, crc, org_size, compr_size):
"""
Create file descriptor.
This function also updates size and crc fields of file_struct
"""
# hack for making CRC unsigned long
file_struct['crc'] = crc & 0xffffffff
file_struct['size'] = org_size
file_struct['csize'] = compr_size
fields = {"uncomp_size": file_struct['size'],
"comp_size": file_struct['csize'],
"crc": file_struct['crc']}
descriptor = consts.DD_TUPLE(**fields)
descriptor = consts.DD_STRUCT.pack(*descriptor)
if self.__use_ddmagic:
descriptor = consts.DD_MAGIC + descriptor
return descriptor
def _make_cdir_file_header(self, file_struct):
"""
Create central directory file header
"""
fields = {"signature": consts.CDFH_MAGIC,
"system": 0x03, # 0x03 - unix
"version": self.__version,
"version_ndd": self.__version,
"flags": file_struct['flags'],
"compression": file_struct['cmpr_id'],
"mod_time": file_struct['mod_time'],
"mod_date": file_struct['mod_date'],
"uncomp_size": file_struct['size'],
"comp_size": file_struct['csize'],
"offset": file_struct['offset'], # < file header offset
"crc": file_struct['crc'],
"fname_len": len(file_struct['fname']),
"extra_len": 0,
"fcomm_len": 0, # comment length
"disk_start": 0,
"attrs_int": 0,
"attrs_ext": 0}
cdfh = consts.CDLF_TUPLE(**fields)
cdfh = consts.CDLF_STRUCT.pack(*cdfh)
cdfh += file_struct['fname']
return cdfh
def _make_cdend(self):
"""
make end of central directory record
"""
fields = {"signature": consts.CD_END_MAGIC,
"disk_num": 0,
"disk_cdstart": 0,
"disk_entries": len(self.__files),
"total_entries": len(self.__files),
"cd_size": self.__cdir_size,
"cd_offset": self._offset_get(),
"comment_len": 0}
cdend = consts.CD_END_TUPLE(**fields)
cdend = consts.CD_END_STRUCT.pack(*cdend)
return cdend
def _make_end_structures(self):
"""
cdir and cdend structures are saved at the end of zip file
"""
# stream central directory entries
for idx, file_struct in enumerate(self.__files):
chunk = self._make_cdir_file_header(file_struct)
self.__cdir_size += len(chunk)
yield chunk
# stream end of central directory
yield self._make_cdend()
def _offset_add(self, value):
self.__offset += value
def _offset_get(self):
return self.__offset
def _add_file_to_cdir(self, file_struct):
self.__files.append(file_struct)
def _cleanup(self):
"""
Clean all structs after streaming
"""
self.__files = []
self.__cdir_size = self.__offset = 0
class ZipStream(ZipBase):
def data_generator(self, src, src_type):
if src_type == 's':
for chunk in src:
yield chunk
return
if src_type == 'f':
with open(src, "rb") as fh:
while True:
part = fh.read(self.chunksize)
if not part:
break
yield part
return
def _stream_single_file(self, file_struct):
"""
stream single zip file with header and descriptor at the end
"""
yield self._make_local_file_header(file_struct)
pcs = Processor(file_struct)
for chunk in self.data_generator(file_struct['src'], file_struct['stype']):
chunk = pcs.process(chunk)
if len(chunk) > 0:
yield chunk
chunk = pcs.tail()
if len(chunk) > 0:
yield chunk
yield self._make_data_descriptor(file_struct, *pcs.state())
def stream(self):
"""
Stream complete archive
"""
# stream files
for idx, source in enumerate(self._source_of_files):
file_struct = self._create_file_struct(source)
# file offset in archive
file_struct['offset'] = self._offset_get()
self._add_file_to_cdir(file_struct)
# file data
for chunk in self._stream_single_file(file_struct):
self._offset_add(len(chunk))
yield chunk
# stream zip structures
for chunk in self._make_end_structures():
yield chunk
self._cleanup()
| {
"content_hash": "cb98a535acf8f2514858e8e4fb35f36d",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 92,
"avg_line_length": 34.864686468646866,
"alnum_prop": 0.5196895115486558,
"repo_name": "m2ozg/zipstream",
"id": "3bf9515fc19bee3b94e9f417e490fee2c2586105",
"size": "10715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zipstream/zipstream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14279"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flexget.api.app import base_message
from flexget.utils import json
class TestFormatChecker(object):
config = 'tasks: {}'
def test_quality(self, api_client, schema_match):
payload1 = {'quality': '720p'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'quality': '720p-1080p'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_quality_req(self, api_client, schema_match):
payload1 = {'quality_requirements': '720p-1080p'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'quality': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_time(self, api_client, schema_match):
payload = {'time': '10:00'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload = {'time': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_interval(self, api_client, schema_match):
payload1 = {'interval': '1 day'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'interval': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_percent(self, api_client, schema_match):
payload1 = {'percent': '79%'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'percent': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_size(self, api_client, schema_match):
payload1 = {'size': '4GB'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'percent': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_regex(self, api_client, schema_match):
payload1 = {'regex': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'regex': '(('}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_file(self, api_client, schema_match):
payload1 = {'file': 'test_format_checker_api.py'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'file': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_path(self, api_client, schema_match):
payload1 = {'path': '../api_tests'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'path': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_url(self, api_client, schema_match):
payload1 = {'url': 'http://google.com'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'url': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
def test_episode_identifier(self, api_client, schema_match):
payload1 = {'episode_identifier': 's01e01'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload1))
assert rsp.status_code == 200, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
payload2 = {'episode_identifier': 'bla'}
rsp = api_client.json_post('/format_check/', data=json.dumps(payload2))
assert rsp.status_code == 422, 'Response code is %s' % rsp.status_code
data = json.loads(rsp.get_data(as_text=True))
errors = schema_match(base_message, data)
assert not errors
| {
"content_hash": "5c73833a0848a8cf7ed6f8b460c68490",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 79,
"avg_line_length": 37.44292237442922,
"alnum_prop": 0.618780487804878,
"repo_name": "jacobmetrick/Flexget",
"id": "509c43e2240c25019f60e515325095a2c5586425",
"size": "8200",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/tests/api_tests/test_format_checker_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "HTML",
"bytes": "78933"
},
{
"name": "JavaScript",
"bytes": "261421"
},
{
"name": "Python",
"bytes": "3090372"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
} |
from urllib.parse import urlsplit
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import IntegrityError, transaction
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from connect.accounts.models import UserLink, UserSkill, LinkBrand
def save_paired_items(request, user, formset, Model,
item_name, counterpart_name):
"""
Handle saving skills or links to the database.
"""
paired_items = []
for form in formset:
if form.is_valid():
item = form.cleaned_data.get(item_name, None)
counterpart = form.cleaned_data.get(counterpart_name, None)
if item and counterpart:
model_instance = Model(user=user)
setattr(model_instance, item_name, item)
setattr(model_instance, counterpart_name, counterpart)
paired_items.append(model_instance)
# Replace old pairs with new
# Do this in a transaction to avoid a case where we delete the old
# but cannot save the new
try:
with transaction.atomic():
Model.objects.filter(user=user).delete()
Model.objects.bulk_create(paired_items)
except IntegrityError:
messages.error(request, _('There was an error updating your profile.'))
return redirect(reverse('accounts:profile-settings'))
def save_skills(request, user, formset):
"""Wrapper function to save paired skills and proficiencies."""
save_paired_items(request, user, formset, UserSkill, 'skill',
'proficiency')
def save_links(request, user, formset):
"""Wrapper function to save paired link anchors and URLs."""
save_paired_items(request, user, formset, UserLink, 'anchor', 'url')
def match_link_to_brand(user_links):
"""
Attempt to match a user's links to recognised brands (LinkBrand).
This functionality also exists as a custom save() method on the model.
-- Use this with functions that create and update in bulk.
"""
for link in user_links:
domain = urlsplit(link.url).netloc
try:
brand = LinkBrand.objects.get(domain=domain)
link.icon = brand
link.save()
except ObjectDoesNotExist:
pass
return user_links
| {
"content_hash": "8d8f43f00b767dd730ebf5db7328a9d9",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 33.901408450704224,
"alnum_prop": 0.6609887827170752,
"repo_name": "f3r3nc/connect",
"id": "9daf13963315e1797d6ac17a1069f3600288cb8c",
"size": "2407",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "connect/accounts/view_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "234365"
},
{
"name": "Cucumber",
"bytes": "32509"
},
{
"name": "HTML",
"bytes": "102248"
},
{
"name": "JavaScript",
"bytes": "8734"
},
{
"name": "Makefile",
"bytes": "906"
},
{
"name": "Python",
"bytes": "259283"
},
{
"name": "Ruby",
"bytes": "861"
}
],
"symlink_target": ""
} |
from datetime import datetime
from pyparsing import *
# define an integer string, and a parse action to convert it
# to an integer at parse time
integer = Word(nums)
def convertToInt(tokens):
# no need to test for validity - we can't get here
# unless tokens[0] contains all numeric digits
return int(tokens[0])
integer.setParseAction(convertToInt)
# or can be written as one line as
#integer = Word(nums).setParseAction(lambda t: int(t[0]))
# define a pattern for a year/month/day date
date = integer('year') + '/' + integer('month') + '/' + integer('day')
def convertToDatetime(s,loc,tokens):
try:
# note that the year, month, and day fields were already
# converted to ints from strings by the parse action defined
# on the integer expression above
return datetime(tokens.year, tokens.month, tokens.day)
except Exception as ve:
errmsg = "'%d/%d/%d' is not a valid date, %s" % \
(tokens.year, tokens.month, tokens.day, ve)
raise ParseException(s, loc, errmsg)
date.setParseAction(convertToDatetime)
def test(s):
try:
print(date.parseString(s))
except ParseException as pe:
print(pe)
test("2000/1/1")
test("2000/13/1") # invalid month
test("1900/2/29") # 1900 was not a leap year
test("2000/2/29") # but 2000 was
| {
"content_hash": "9fd7b4ee99262d405f474813c30a1450",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 70,
"avg_line_length": 33.1,
"alnum_prop": 0.6737160120845922,
"repo_name": "mollstam/UnrealPy",
"id": "ca3b7e39e132630e6e252fe22413d5a28e523cfb",
"size": "1535",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/pyparsing-2.0.3/examples/datetimeParseActions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886305"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925518"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from itertools import chain
import os
import sys
from fnmatch import fnmatch
from setuptools import setup
import versioneer
def ispackage(x):
return os.path.isdir(x) and os.path.exists(os.path.join(x, '__init__.py'))
def istestdir(x):
return os.path.isdir(x) and not os.path.exists(os.path.join(x, '__init__.py'))
def find_packages(where='blaze', exclude=('ez_setup', 'distribute_setup'),
predicate=ispackage):
if sys.version_info[0] == 3:
exclude += ('*py2only*', '*__pycache__*')
func = lambda x: predicate(x) and not any(fnmatch(x, exc)
for exc in exclude)
return list(filter(func, [x[0] for x in os.walk(where)]))
packages = find_packages()
testdirs = find_packages(predicate=(lambda x: istestdir(x) and
os.path.basename(x) == 'tests'))
def find_data_files(exts, where='blaze'):
exts = tuple(exts)
for root, dirs, files in os.walk(where):
for f in files:
if any(fnmatch(f, pat) for pat in exts):
yield os.path.join(root, f)
exts = '*.h5', '*.tsv', '*.csv', '*.xls', '*.xlsx', '*.db', '*.json', '*.gz', '*.hdf5'
package_data = [os.path.join(x.replace('blaze' + os.sep, ''), '*.py')
for x in testdirs]
package_data += [x.replace('blaze' + os.sep, '')
for x in find_data_files(exts)]
def read(filename):
with open(filename, 'r') as f:
return f.read()
def read_reqs(filename):
return read(filename).strip().splitlines()
def install_requires():
reqs = read_reqs('etc/requirements.txt')
if sys.version_info[0] == 2:
reqs += read_reqs('etc/requirements_py2.txt')
return reqs
def extras_require():
extras = {req: read_reqs('etc/requirements_%s.txt' % req)
for req in {'bcolz',
'ci',
'dask',
'h5py',
'mongo',
'mysql',
'numba',
'postgres',
'pyhive',
'pytables',
'server',
'sql',
'test'}}
extras['mysql'] += extras['sql']
extras['postgres'] += extras['sql']
# don't include the 'ci' or 'test' targets in 'all'
extras['all'] = list(chain.from_iterable(v for k, v in extras.items()
if k not in {'ci', 'test'}))
return extras
if __name__ == '__main__':
setup(name='blaze',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author='Continuum Analytics',
author_email='blaze-dev@continuum.io',
description='Blaze',
long_description=read('README.rst'),
install_requires=install_requires(),
extras_require=extras_require(),
license='BSD',
classifiers=['Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Utilities'],
entry_points={'console_scripts': ['blaze-server = blaze.server.spider:_main']},
package_data={'blaze': package_data},
packages=packages)
| {
"content_hash": "bd508da501e998225c2b825b33b5d383",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 89,
"avg_line_length": 33.42982456140351,
"alnum_prop": 0.5087903437418001,
"repo_name": "ContinuumIO/blaze",
"id": "236bc3625867fe20e3238a3039e14e59980b4aae",
"size": "3834",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "862729"
},
{
"name": "Shell",
"bytes": "35"
}
],
"symlink_target": ""
} |
"""
Test running processes with the APIs in L{twisted.internet.utils}.
"""
import warnings, os, stat, sys, signal
from twisted.python.runtime import platform
from twisted.trial import unittest
from twisted.internet import error, reactor, utils, interfaces
class ProcessUtilsTests(unittest.TestCase):
"""
Test running a process using L{getProcessOutput}, L{getProcessValue}, and
L{getProcessOutputAndValue}.
"""
if interfaces.IReactorProcess(reactor, None) is None:
skip = "reactor doesn't implement IReactorProcess"
output = None
value = None
exe = sys.executable
def makeSourceFile(self, sourceLines):
"""
Write the given list of lines to a text file and return the absolute
path to it.
"""
script = self.mktemp()
scriptFile = file(script, 'wt')
scriptFile.write(os.linesep.join(sourceLines) + os.linesep)
scriptFile.close()
return os.path.abspath(script)
def test_output(self):
"""
L{getProcessOutput} returns a L{Deferred} which fires with the complete
output of the process it runs after that process exits.
"""
scriptFile = self.makeSourceFile([
"import sys",
"for s in 'hello world\\n':",
" sys.stdout.write(s)",
" sys.stdout.flush()"])
d = utils.getProcessOutput(self.exe, ['-u', scriptFile])
return d.addCallback(self.assertEqual, "hello world\n")
def test_outputWithErrorIgnored(self):
"""
The L{Deferred} returned by L{getProcessOutput} is fired with an
L{IOError} L{Failure} if the child process writes to stderr.
"""
# make sure stderr raises an error normally
scriptFile = self.makeSourceFile([
'import sys',
'sys.stderr.write("hello world\\n")'
])
d = utils.getProcessOutput(self.exe, ['-u', scriptFile])
d = self.assertFailure(d, IOError)
def cbFailed(err):
return self.assertFailure(err.processEnded, error.ProcessDone)
d.addCallback(cbFailed)
return d
def test_outputWithErrorCollected(self):
"""
If a C{True} value is supplied for the C{errortoo} parameter to
L{getProcessOutput}, the returned L{Deferred} fires with the child's
stderr output as well as its stdout output.
"""
scriptFile = self.makeSourceFile([
'import sys',
# Write the same value to both because ordering isn't guaranteed so
# this simplifies the test.
'sys.stdout.write("foo")',
'sys.stdout.flush()',
'sys.stderr.write("foo")',
'sys.stderr.flush()'])
d = utils.getProcessOutput(self.exe, ['-u', scriptFile], errortoo=True)
return d.addCallback(self.assertEqual, "foofoo")
def test_value(self):
"""
The L{Deferred} returned by L{getProcessValue} is fired with the exit
status of the child process.
"""
scriptFile = self.makeSourceFile(["raise SystemExit(1)"])
d = utils.getProcessValue(self.exe, ['-u', scriptFile])
return d.addCallback(self.assertEqual, 1)
def test_outputAndValue(self):
"""
The L{Deferred} returned by L{getProcessOutputAndValue} fires with a
three-tuple, the elements of which give the data written to the child's
stdout, the data written to the child's stderr, and the exit status of
the child.
"""
exe = sys.executable
scriptFile = self.makeSourceFile([
"import sys",
"sys.stdout.write('hello world!\\n')",
"sys.stderr.write('goodbye world!\\n')",
"sys.exit(1)"
])
def gotOutputAndValue((out, err, code)):
self.assertEqual(out, "hello world!\n")
self.assertEqual(err, "goodbye world!" + os.linesep)
self.assertEqual(code, 1)
d = utils.getProcessOutputAndValue(self.exe, ["-u", scriptFile])
return d.addCallback(gotOutputAndValue)
def test_outputSignal(self):
"""
If the child process exits because of a signal, the L{Deferred}
returned by L{getProcessOutputAndValue} fires a L{Failure} of a tuple
containing the the child's stdout, stderr, and the signal which caused
it to exit.
"""
# Use SIGKILL here because it's guaranteed to be delivered. Using
# SIGHUP might not work in, e.g., a buildbot slave run under the
# 'nohup' command.
scriptFile = self.makeSourceFile([
"import sys, os, signal",
"sys.stdout.write('stdout bytes\\n')",
"sys.stderr.write('stderr bytes\\n')",
"sys.stdout.flush()",
"sys.stderr.flush()",
"os.kill(os.getpid(), signal.SIGKILL)"])
def gotOutputAndValue((out, err, sig)):
self.assertEqual(out, "stdout bytes\n")
self.assertEqual(err, "stderr bytes\n")
self.assertEqual(sig, signal.SIGKILL)
d = utils.getProcessOutputAndValue(self.exe, ['-u', scriptFile])
d = self.assertFailure(d, tuple)
return d.addCallback(gotOutputAndValue)
if platform.isWindows():
test_outputSignal.skip = "Windows doesn't have real signals."
def _pathTest(self, utilFunc, check):
dir = os.path.abspath(self.mktemp())
os.makedirs(dir)
scriptFile = self.makeSourceFile([
"import os, sys",
"sys.stdout.write(os.getcwd())"])
d = utilFunc(self.exe, ['-u', scriptFile], path=dir)
d.addCallback(check, dir)
return d
def test_getProcessOutputPath(self):
"""
L{getProcessOutput} runs the given command with the working directory
given by the C{path} parameter.
"""
return self._pathTest(utils.getProcessOutput, self.assertEqual)
def test_getProcessValuePath(self):
"""
L{getProcessValue} runs the given command with the working directory
given by the C{path} parameter.
"""
def check(result, ignored):
self.assertEqual(result, 0)
return self._pathTest(utils.getProcessValue, check)
def test_getProcessOutputAndValuePath(self):
"""
L{getProcessOutputAndValue} runs the given command with the working
directory given by the C{path} parameter.
"""
def check((out, err, status), dir):
self.assertEqual(out, dir)
self.assertEqual(status, 0)
return self._pathTest(utils.getProcessOutputAndValue, check)
def _defaultPathTest(self, utilFunc, check):
# Make another directory to mess around with.
dir = os.path.abspath(self.mktemp())
os.makedirs(dir)
scriptFile = self.makeSourceFile([
"import os, sys, stat",
# Fix the permissions so we can report the working directory.
# On OS X (and maybe elsewhere), os.getcwd() fails with EACCES
# if +x is missing from the working directory.
"os.chmod(%r, stat.S_IXUSR)" % (dir,),
"sys.stdout.write(os.getcwd())"])
# Switch to it, but make sure we switch back
self.addCleanup(os.chdir, os.getcwd())
os.chdir(dir)
# Get rid of all its permissions, but make sure they get cleaned up
# later, because otherwise it might be hard to delete the trial
# temporary directory.
self.addCleanup(
os.chmod, dir, stat.S_IMODE(os.stat('.').st_mode))
os.chmod(dir, 0)
d = utilFunc(self.exe, ['-u', scriptFile])
d.addCallback(check, dir)
return d
def test_getProcessOutputDefaultPath(self):
"""
If no value is supplied for the C{path} parameter, L{getProcessOutput}
runs the given command in the same working directory as the parent
process and succeeds even if the current working directory is not
accessible.
"""
return self._defaultPathTest(utils.getProcessOutput, self.assertEqual)
def test_getProcessValueDefaultPath(self):
"""
If no value is supplied for the C{path} parameter, L{getProcessValue}
runs the given command in the same working directory as the parent
process and succeeds even if the current working directory is not
accessible.
"""
def check(result, ignored):
self.assertEqual(result, 0)
return self._defaultPathTest(utils.getProcessValue, check)
def test_getProcessOutputAndValueDefaultPath(self):
"""
If no value is supplied for the C{path} parameter,
L{getProcessOutputAndValue} runs the given command in the same working
directory as the parent process and succeeds even if the current
working directory is not accessible.
"""
def check((out, err, status), dir):
self.assertEqual(out, dir)
self.assertEqual(status, 0)
return self._defaultPathTest(
utils.getProcessOutputAndValue, check)
class WarningSuppression(unittest.TestCase):
def setUp(self):
self.warnings = []
self.originalshow = warnings.showwarning
warnings.showwarning = self.showwarning
def tearDown(self):
warnings.showwarning = self.originalshow
def showwarning(self, *a, **kw):
self.warnings.append((a, kw))
def testSuppressWarnings(self):
def f(msg):
warnings.warn(msg)
g = utils.suppressWarnings(f, (('ignore',), dict(message="This is message")))
# Start off with a sanity check - calling the original function
# should emit the warning.
f("Sanity check message")
self.assertEqual(len(self.warnings), 1)
# Now that that's out of the way, call the wrapped function, and
# make sure no new warnings show up.
g("This is message")
self.assertEqual(len(self.warnings), 1)
# Finally, emit another warning which should not be ignored, and
# make sure it is not.
g("Unignored message")
self.assertEqual(len(self.warnings), 2)
| {
"content_hash": "0cc2b3d03d4c37050daf7922c81911f1",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 85,
"avg_line_length": 35.2457337883959,
"alnum_prop": 0.6110196572092573,
"repo_name": "Varriount/Colliberation",
"id": "d89ab56774792a7bc01d3614f4e0e279bf8a254b",
"size": "10400",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "libs/twisted/test/test_iutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "509005"
},
{
"name": "D",
"bytes": "29"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "10503398"
},
{
"name": "Shell",
"bytes": "1512"
}
],
"symlink_target": ""
} |
from django import template
from django.conf import settings
from leaflets.models import Leaflet
from django.db.models import Count
register = template.Library()
@register.inclusion_tag('parties/ordered_list.html')
def party_list_by_count():
from parties.models import Party
from datetime import datetime
when = datetime(year=2010,month=5, day=1)
parties = Party.objects.order_by('-count').all()[0:10]
return { 'MEDIA_URL': settings.MEDIA_URL, 'parties': parties } | {
"content_hash": "b31907d8c58e1a36c877b5133f4e86db",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 66,
"avg_line_length": 32.733333333333334,
"alnum_prop": 0.7352342158859471,
"repo_name": "electionleaflets/electionleaflets",
"id": "08f6679223d16f91c196e676af4eec99db9eec78",
"size": "491",
"binary": false,
"copies": "2",
"ref": "refs/heads/django_1_7",
"path": "electionleaflets/apps/parties/templatetags/party_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17642"
},
{
"name": "JavaScript",
"bytes": "9551"
},
{
"name": "Python",
"bytes": "106088"
}
],
"symlink_target": ""
} |
from datetime import datetime
import entity
from .page import Page
class Employment(entity.Entity):
"""Employment instances represent Facebook users's work history."""
employer = None
"""A :class:`Page` instance describing the employer."""
position = None
"""A :class:`Page` instance describing the position."""
started_at = None
"""A :class:`datetime <datetime.datetime>` instance describing when the employment begun."""
ended_at = None
"""A :class:`datetime <datetime.datetime>` instance describing when the employment ended, or ``None`` if it hasn't."""
def __init__(self, employer, position, started_at, ended_at):
self.employer = Page(**employer)
self.position = Page(**position)
self.started_at = datetime.strptime(started_at, '%Y-%m')
if ended_at:
self.ended_at = datetime.strptime(ended_at, '%Y-%m')
else:
self.ended_at = None
@property
def is_employed(self):
"""A boolean describing whether the user currently holds this position."""
bool(self.ended_at)
| {
"content_hash": "f7c9978acee8f24ac3a9a91b878834af",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 122,
"avg_line_length": 31.485714285714284,
"alnum_prop": 0.6442831215970962,
"repo_name": "vyyvyyv/facebook",
"id": "2f349f405c8a686187ccdaf986054f8d475111ec",
"size": "1102",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "facebook/employment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "17"
},
{
"name": "Python",
"bytes": "21564"
},
{
"name": "Ruby",
"bytes": "277"
}
],
"symlink_target": ""
} |
from pymongo import MongoClient
import os
import json
import yaml
import shutil
import locale
from fwissr.fwissr import Fwissr
def setup_global_conf():
# create additional file sources
first = 'mouarf.lol.json'
second = 'trop.mdr.json'
create_tmp_conf_file(first, {
'meu': 'ringue',
'pa': {'pri': 'ka'},
})
create_tmp_conf_file(second, {
'gein': 'gembre',
'pa': {'ta': 'teu'},
})
# create additional mongodb sources
create_tmp_mongo_col('roque.fort', {
'bar': 'baz',
})
create_tmp_mongo_col('cam.en.bert', {
'pim': {'pam': ['pom', 'pum']},
})
# create main conf file
fwissr_conf = {
'fwissr_sources': [
{'filepath': tmp_conf_file(first)},
{'filepath': tmp_conf_file(second), 'top_level': True},
{'mongodb': tmp_mongo_db_uri(
tmp_mongo_db()),
'collection': 'roque.fort', 'top_level': True},
{'mongodb': tmp_mongo_db_uri(
tmp_mongo_db()),
'collection': 'cam.en.bert'},
],
'fwissr_refresh_period': 5,
'foo': 'bar',
}
create_tmp_conf_file('fwissr.json', fwissr_conf)
def tmp_conf_dir():
return "/tmp/fwissr.spec"
def tmp_conf_file(filename):
return "%s/%s" % (tmp_conf_dir(), filename)
def create_tmp_conf_file(filename, conf):
conf_file_path = os.path.join(tmp_conf_dir(), filename)
if os.path.exists(conf_file_path):
os.unlink(conf_file_path)
if not os.path.lexists(tmp_conf_dir()):
os.makedirs(tmp_conf_dir())
f = open(conf_file_path, 'w')
if os.path.splitext(filename)[1] == ".json":
f.write(json.dumps(conf, sort_keys=True))
elif os.path.splitext(filename)[1] == ".yml":
f.write(yaml.dump(conf))
else:
raise Exception("Unsupported conf file type", filename)
f.close()
def delete_tmp_conf_files():
if os.path.abspath(
tmp_conf_dir()) == os.path.abspath(Fwissr.DEFAULT_MAIN_CONF_PATH):
raise Exception(
"Hey, don't delete all legal conf files !",
tmp_conf_dir())
shutil.rmtree(tmp_conf_dir(), True)
def set_tmp_conf(conf_dir=tmp_conf_dir, user_conf_dir=""):
Fwissr.conf_dir = conf_dir
Fwissr.user_conf_dir = user_conf_dir
def env_or(env_name, or_value):
"""Returns the env variable value or or_value if unset"""
if env_name in os.environ:
return os.environ[env_name]
else:
return or_value
def tmp_mongo_hostname():
return env_or("MONGO_HOSTNAME", "localhost")
def tmp_mongo_port():
return env_or("MONGO_PORT", 27017)
if "MONGO_PORT" in os.environ:
return locale.atoi(os.environ["MONGO_PORT"])
else:
return 27017
def tmp_mongo_db():
return "fwissr_spec"
def tmp_mongo_db_uri(db=""):
return "mongodb://%s:%s/%s" % (
tmp_mongo_hostname(),
tmp_mongo_port(),
db)
def mongo_connection():
return MongoClient(tmp_mongo_db_uri())
def create_tmp_mongo_col(name, conf):
col = mongo_connection()[tmp_mongo_db()].create_collection(name)
for key, val in conf.iteritems():
col.insert({'_id': key, 'value': val})
def delete_tmp_mongo_db():
client = mongo_connection()
client.drop_database(tmp_mongo_db())
| {
"content_hash": "11d16b738996a38e06fd514a93cf4422",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 78,
"avg_line_length": 24.386861313868614,
"alnum_prop": 0.5800658485483389,
"repo_name": "fotonauts/fwissr-python",
"id": "a74de3c0161d95f468cebfa31a383783873533ba",
"size": "3343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52123"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
} |
"""
FILE: sample_authentication.py
DESCRIPTION:
These samples demonstrate authenticating an attestation client instance and
an attestation administration client instance.
USAGE:
python sample_authentication.py
Set the environment variables with your own values before running the sample:
1) ATTESTATION_AAD_URL - the base URL for an attestation service instance in AAD mode.
2) ATTESTATION_ISOLATED_URL - the base URL for an attestation service instance in Isolated mode.
3) ATTESTATION_LOCATION_SHORT_NAME - the short name for the region in which the
sample should be run - used to interact with the shared endpoint for that
region.
4) ATTESTATION_TENANT_ID - Tenant Instance for authentication.
5) ATTESTATION_CLIENT_ID - Client identity for authentication.
6) ATTESTATION_CLIENT_SECRET - Secret used to identify the client.
Usage:
python sample_authentication_async.py
This sample demonstrates establishing a connection to the attestation service
using client secrets stored in environment variables.
To verify that the connection completed successfully, it also calls the
`get_openidmetadata` API on the client to retrieve the OpenID metadata discovery
document for the attestation service instance.
"""
import os
from dotenv import find_dotenv, load_dotenv
import base64
from sample_utils import write_banner
class AttestationClientCreateSamples(object):
def __init__(self):
load_dotenv(find_dotenv())
self.aad_url = os.environ.get("ATTESTATION_AAD_URL")
self.isolated_url = os.environ.get("ATTESTATION_ISOLATED_URL")
if self.isolated_url:
self.isolated_certificate = base64.b64decode(
os.getenv("ATTESTATION_ISOLATED_SIGNING_CERTIFICATE")
)
self.isolated_key = base64.b64decode(
os.getenv("ATTESTATION_ISOLATED_SIGNING_KEY")
)
shared_short_name = os.getenv("ATTESTATION_LOCATION_SHORT_NAME")
self.shared_url = "https://shared{}.{}.attest.azure.net".format(
shared_short_name, shared_short_name
) # type: str
def close(self):
pass
def create_attestation_client_aad(self):
"""
Instantiate an attestation client using client secrets.
"""
write_banner("create_attestation_client_aad")
# [START client_create]
# Create azure-identity class
from azure.identity import DefaultAzureCredential
from azure.security.attestation import AttestationClient
# And now create an AttestationClient.
with AttestationClient(self.aad_url, DefaultAzureCredential()) as client:
print("Retrieve OpenID metadata from: ", self.aad_url)
openid_metadata = client.get_open_id_metadata()
print(" Certificate URI: ", openid_metadata["jwks_uri"])
print(" Issuer: ", openid_metadata["issuer"])
# [END client_create]
def create_attestation_client_shared(self):
"""
Instantiate an attestation client using client secrets to access the shared attestation provider.
"""
write_banner("create_attestation_client_shared")
# [START sharedclient_create]
from azure.identity import DefaultAzureCredential
from azure.security.attestation import AttestationClient
shared_short_name = os.getenv("ATTESTATION_LOCATION_SHORT_NAME")
shared_url = (
"https://shared"
+ shared_short_name
+ "."
+ shared_short_name
+ ".attest.azure.net"
)
with AttestationClient(shared_url, DefaultAzureCredential()) as client:
print("Retrieve OpenID metadata from: ", shared_url)
openid_metadata = client.get_open_id_metadata()
print(" Certificate URI: ", openid_metadata["jwks_uri"])
print(" Issuer: ", openid_metadata["issuer"])
# [END shared_client_create]
def __enter__(self):
return self
def __exit__(self, *exc_type):
self.close()
if __name__ == "__main__":
with AttestationClientCreateSamples() as sample:
sample.create_attestation_client_aad()
sample.create_attestation_client_shared()
| {
"content_hash": "21799f5eff6cb81f3d525464d806d76d",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 105,
"avg_line_length": 38.098214285714285,
"alnum_prop": 0.6693227091633466,
"repo_name": "Azure/azure-sdk-for-python",
"id": "158977a293111a087afe3495c46a64312b674699",
"size": "4595",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/attestation/azure-security-attestation/samples/sample_authentication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"Methods for creating, parsing, and dealing with XML and ElementTree objects."
import io
import sys
import six
import types
from six import StringIO
from io import BytesIO
from lxml import etree
# In case issues come up with XML generation/parsing
# make sure you have the ElementTree v1.2.7+ lib as
# well as lxml v3.0+
from ncclient import NCClientError
parser = etree.XMLParser(recover=False)
class XMLError(NCClientError):
pass
### Namespace-related
#: Base NETCONF namespace
BASE_NS_1_0 = "urn:ietf:params:xml:ns:netconf:base:1.0"
# NXOS_1_0
NXOS_1_0 = "http://www.cisco.com/nxos:1.0"
# NXOS_IF
NXOS_IF = "http://www.cisco.com/nxos:1.0:if_manager"
#: Namespace for Tail-f core data model
TAILF_AAA_1_1 = "http://tail-f.com/ns/aaa/1.1"
#: Namespace for Tail-f execd data model
TAILF_EXECD_1_1 = "http://tail-f.com/ns/execd/1.1"
#: Namespace for Cisco data model
CISCO_CPI_1_0 = "http://www.cisco.com/cpi_10/schema"
#: Namespace for Flowmon data model
FLOWMON_1_0 = "http://www.liberouter.org/ns/netopeer/flowmon/1.0"
#: Namespace for Juniper 9.6R4. Tested with Junos 9.6R4+
JUNIPER_1_1 = "http://xml.juniper.net/xnm/1.1/xnm"
#: Namespace for Huawei data model
HUAWEI_NS = "http://www.huawei.com/netconf/vrp"
#: Namespace for Huawei private
HW_PRIVATE_NS = "http://www.huawei.com/netconf/capability/base/1.0"
#: Namespace for H3C data model
H3C_DATA_1_0 = "http://www.h3c.com/netconf/data:1.0"
#: Namespace for H3C config model
H3C_CONFIG_1_0 = "http://www.h3c.com/netconf/config:1.0"
#: Namespace for H3C action model
H3C_ACTION_1_0 = "http://www.h3c.com/netconf/action:1.0"
#: Namespace for netconf monitoring
NETCONF_MONITORING_NS = "urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring"
#: Namespace for netconf notifications
NETCONF_NOTIFICATION_NS = "urn:ietf:params:xml:ns:netconf:notification:1.0"
#: Namespace for netconf with-defaults (RFC 6243)
NETCONF_WITH_DEFAULTS_NS = "urn:ietf:params:xml:ns:yang:ietf-netconf-with-defaults"
#
try:
register_namespace = etree.register_namespace
except AttributeError:
def register_namespace(prefix, uri):
from xml.etree import ElementTree
# cElementTree uses ElementTree's _namespace_map, so that's ok
ElementTree._namespace_map[uri] = prefix
for (ns, pre) in six.iteritems({
BASE_NS_1_0: 'nc',
NETCONF_MONITORING_NS: 'ncm',
NXOS_1_0: 'nxos',
NXOS_IF: 'if',
TAILF_AAA_1_1: 'aaa',
TAILF_EXECD_1_1: 'execd',
CISCO_CPI_1_0: 'cpi',
FLOWMON_1_0: 'fm',
JUNIPER_1_1: 'junos',
}):
register_namespace(pre, ns)
qualify = lambda tag, ns=BASE_NS_1_0: tag if ns is None else "{%s}%s" % (ns, tag)
"""Qualify a *tag* name with a *namespace*, in :mod:`~xml.etree.ElementTree` fashion i.e. *{namespace}tagname*."""
def to_xml(ele, encoding="UTF-8", pretty_print=False):
"Convert and return the XML for an *ele* (:class:`~xml.etree.ElementTree.Element`) with specified *encoding*."
xml = etree.tostring(ele, encoding=encoding, pretty_print=pretty_print)
if sys.version < '3':
return xml if xml.startswith('<?xml') else '<?xml version="1.0" encoding="%s"?>%s' % (encoding, xml)
else:
return xml.decode('UTF-8') if xml.startswith(b'<?xml') \
else '<?xml version="1.0" encoding="%s"?>%s' % (encoding, xml.decode('UTF-8'))
def to_ele(x):
"Convert and return the :class:`~xml.etree.ElementTree.Element` for the XML document *x*. If *x* is already an :class:`~xml.etree.ElementTree.Element` simply returns that."
if sys.version < '3':
return x if etree.iselement(x) else etree.fromstring(x, parser=parser)
else:
return x if etree.iselement(x) else etree.fromstring(x.encode('UTF-8'), parser=parser)
def parse_root(raw):
"Efficiently parses the root element of a *raw* XML document, returning a tuple of its qualified name and attribute dictionary."
if sys.version < '3':
fp = StringIO(raw)
else:
fp = BytesIO(raw.encode('UTF-8'))
for event, element in etree.iterparse(fp, events=('start',)):
return (element.tag, element.attrib)
def validated_element(x, tags=None, attrs=None):
"""Checks if the root element of an XML document or Element meets the supplied criteria.
*tags* if specified is either a single allowable tag name or sequence of allowable alternatives
*attrs* if specified is a sequence of required attributes, each of which may be a sequence of several allowable alternatives
Raises :exc:`XMLError` if the requirements are not met.
"""
ele = to_ele(x)
if tags:
if isinstance(tags, (str, bytes)):
tags = [tags]
if ele.tag not in tags:
raise XMLError("Element [%s] does not meet requirement" % ele.tag)
if attrs:
for req in attrs:
if isinstance(req, (str, bytes)): req = [req]
for alt in req:
if alt in ele.attrib:
break
else:
raise XMLError("Element [%s] does not have required attributes" % ele.tag)
return ele
XPATH_NAMESPACES = {
're':'http://exslt.org/regular-expressions'
}
class NCElement(object):
def __init__(self, result, transform_reply):
self.__result = result
self.__transform_reply = transform_reply
if isinstance(transform_reply, types.FunctionType):
self.__doc = self.__transform_reply(result._root)
else:
self.__doc = self.remove_namespaces(self.__result)
def xpath(self, expression):
"""
return result for a call to lxml xpath()
output will be a list
"""
self.__expression = expression
self.__namespaces = XPATH_NAMESPACES
return self.__doc.xpath(self.__expression, namespaces=self.__namespaces)
def find(self, expression):
"""return result for a call to lxml ElementPath find()"""
self.__expression = expression
return self.__doc.find(self.__expression)
def findtext(self, expression):
"""return result for a call to lxml ElementPath findtext()"""
self.__expression = expression
return self.__doc.findtext(self.__expression)
def __str__(self):
"""syntactic sugar for str() - alias to tostring"""
if sys.version<'3':
return self.tostring
else:
return self.tostring.decode('UTF-8')
@property
def tostring(self):
"""return a pretty-printed string output for rpc reply"""
parser = etree.XMLParser(remove_blank_text=True)
outputtree = etree.XML(etree.tostring(self.__doc), parser)
return etree.tostring(outputtree, pretty_print=True)
@property
def data_xml(self):
"""return an unmodified output for rpc reply"""
return to_xml(self.__doc)
def remove_namespaces(self, rpc_reply):
"""remove xmlns attributes from rpc reply"""
self.__xslt=self.__transform_reply
self.__parser = etree.XMLParser(remove_blank_text=True)
self.__xslt_doc = etree.parse(io.BytesIO(self.__xslt), self.__parser)
self.__transform = etree.XSLT(self.__xslt_doc)
self.__root = etree.fromstring(str(self.__transform(etree.parse(StringIO(str(rpc_reply))))))
return self.__root
new_ele = lambda tag, attrs={}, **extra: etree.Element(qualify(tag), attrs, **extra)
new_ele_ns = lambda tag, ns, attrs={}, **extra: etree.Element(qualify(tag,ns), attrs, **extra)
sub_ele = lambda parent, tag, attrs={}, **extra: etree.SubElement(parent, qualify(tag), attrs, **extra)
sub_ele_ns = lambda parent, tag, ns, attrs={}, **extra: etree.SubElement(parent, qualify(tag, ns), attrs, **extra)
| {
"content_hash": "1415fe7f4a18ac7db8ffb99880a57b5a",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 176,
"avg_line_length": 37.87128712871287,
"alnum_prop": 0.6569934640522875,
"repo_name": "leopoul/ncclient",
"id": "91bc0ab3bb99d922561b13274c587b2b36020e4b",
"size": "8267",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ncclient/xml_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "177964"
}
],
"symlink_target": ""
} |
import socket, struct, sys, types
from py_interface import erl_term
from pyubf_py_interface import to_py_interface, from_py_interface
from pyubf import Atom, Integer
class SocketError(Exception):
pass
class EBFError(Exception):
pass
class Socket:
def __init__(self,host=socket.gethostname(), port=7580):
self.host = host
self.port = port
self.SocketError = SocketError()
try:
self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except socket.error, msg:
raise SocketError, 'Error in Socket Object Creation!!'
def close(self):
self.sock.close()
def __str__(self):
return 'Socket created on Host='+str(self.host)+',Port='+str(self.port)
class Client(Socket):
def connect(self, host=socket.gethostname(), port=7580, timeout=10):
self.host = host
self.port = port
self.sock.settimeout(timeout)
try:
self.sock.connect((self.host, self.port))
except socket.error,msg:
raise SocketError, 'Connection refused to '+str(self.host)+' on port '+str(self.port)
def send(self, data):
size = socket.htonl(len(data))
size = struct.pack("I", size)
self.sock.send(size)
sent = self.sock.sendall(data)
if sent != None:
raise SocketError,'Connection broken to '+str(self.host)+' on port '+str(self.port)
def recv(self):
size = struct.calcsize("I")
if size != 4:
raise EBFError,'Bad "I" size '+str(size)
size = self.sock.recv(size)
try:
size = socket.ntohl(struct.unpack("I", size)[0])
except struct.error, e:
return None
data = ""
while len(data) < size:
chunk = self.sock.recv(size - len(data))
if chunk == '':
raise SocketError, 'Connection broken to '+str(self.host)+' on port '+str(self.port)
data = data + chunk
return data
def __str__(self):
return 'Client connected to Host=' + str(self.host) + ',Port=' + str(self.port)
class EBF(Client):
## TODO: add other primitive types as needed
module = None
timeout = None
def atom(self, v):
return erl_term.ErlAtom(v)
def binary(self, v):
return erl_term.ErlBinary(v)
def string(self, v=""):
return erl_term.ErlString(v)
def ubfstring(self, v=""):
return self.tuple([self.atom('#S'), self.string(v)])
def tuple(self, v=[]):
return erl_term.ErlTuple(v)
def list(self, v=[]):
return erl_term.ErlList(v)
def is_atom(self, t, v=None):
return erl_term.IsErlAtom(t) and (v is None or str(t)==v)
def is_binary(self, t, v=None):
return erl_term.IsErlBinary(t) and (v is None or str(t)==v)
def is_string(self, t, v=None):
return type(t)==types.StringType and (v is None or t==v)
def is_ubfstring(self, t, v=None):
return self.is_tuple(t, 2) and self.is_atom(t[0], '#S') and self.is_string(t[1], v)
def is_tuple(self, t, v=None):
return type(t)==types.TupleType and (v is None or len(t)==v)
def is_list(self, t, v=None):
return type(t)==types.ListType and (v is None or len(t)==v)
def login(self, module, meta_server, host=socket.gethostname(), port=7580, timeout=10):
self.connect(host, port, timeout)
self.module = module
self.timeout = timeout
# read response - hello
term = erl_term.BinaryToTerm(self.recv())
if not self.is_tuple(term, 3):
raise EBFError, term
if not self.is_atom(term[0], 'ebf1.0'):
raise EBFError, (term[0], term)
if not self.is_ubfstring(term[1], meta_server):
raise EBFError, (term[1], term)
# ignore term[2]
# write request - start session
self.send(erl_term.TermToBinary(self.tuple([self.atom('startSession'), self.ubfstring(module), self.list()])))
# read response - start session
term = erl_term.BinaryToTerm(self.recv())
if not self.is_tuple(term, 2):
raise EBFError, term
if not self.is_tuple(term[0], 2):
raise EBFError, (term[0], term)
if not self.is_atom(term[0][0], 'ok'):
raise EBFError, (term[0][0], term)
if not self.is_atom(term[0][1], 'ok'):
raise EBFError, (term[0][1], term)
if not self.is_atom(term[1], 'none'):
raise EBFError, (term[1], term)
### rpc
def rpc(self, module, request, maxsize=None, writetimeout=None, readtimeout=None):
# TODO: implement maxsize
if not self.module==module:
raise EBFError, (module, self.module)
# write request
if writetimeout is None:
self.sock.settimeout(self.timeout)
else:
self.sock.settimeout(writetimeout)
self.send(erl_term.TermToBinary(to_py_interface(request)))
# read response
if readtimeout is None:
self.sock.settimeout(self.timeout)
else:
self.sock.settimeout(readtimeout)
term = erl_term.BinaryToTerm(self.recv())
# check for client broke contract
if self.is_tuple(term, 3) and self.is_atom(term[0], 'clientBrokeContract'):
raise EBFError, term
# check for server broke contract
if self.is_tuple(term, 3) and self.is_atom(term[0], 'serverBrokeContract'):
raise EBFError, term
# check for server broke contract
if not self.is_tuple(term, 2):
raise EBFError, term
self.sock.settimeout(self.timeout)
return from_py_interface(term[0])
if __name__ == "__main__":
ebf = EBF()
## login
ebf.login('gdss', 'gdss_meta_server')
## setup
req0 = (Atom('do'), Atom('tab1'), [(Atom('delete'), 'foo', [])], [], 1000)
res0 = ebf.rpc('gdss', req0)
## get - ng
req1 = (Atom('do'), Atom('tab1'), [(Atom('get'), 'foo', [])], [], 1000)
res1 = ebf.rpc('gdss', req1)
assert res1[0] == 'key_not_exist'
## add - ok
req2 = (Atom('do'), Atom('tab1'), [(Atom('add'), 'foo', 1, 'bar', 0, [])], [], 1000)
res2 = ebf.rpc('gdss', req2)
assert res2[0] == 'ok'
## add - ng
req3 = (Atom('do'), Atom('tab1'), [(Atom('add'), 'foo', 1, 'bar', 0, [])], [], 1000)
res3 = ebf.rpc('gdss', req3)
assert res3[0][0] == 'key_exists'
assert res3[0][1] == 1
## get - ok
req4 = (Atom('do'), Atom('tab1'), [(Atom('get'), 'foo', [])], [], 1000)
res4 = ebf.rpc('gdss', req4)
assert res4[0][0] == 'ok'
assert res4[0][1] == 1
assert res4[0][2] == 'bar'
## set - ok
req5 = (Atom('do'), Atom('tab1'), [(Atom('set'), 'foo', 2, 'baz', 0, [])], [], 1000)
res5 = ebf.rpc('gdss', req5)
assert res5[0] == 'ok'
## get - ok
req6 = (Atom('do'), Atom('tab1'), [(Atom('get'), 'foo', [])], [], 1000)
res6 = ebf.rpc('gdss', req6)
assert res6[0][0] == 'ok'
assert res6[0][1] == 2
assert res6[0][2] == 'baz'
| {
"content_hash": "504e82913d81388f2d57f36c37eef627",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 118,
"avg_line_length": 32.37556561085973,
"alnum_prop": 0.5618448637316562,
"repo_name": "ubf/ubf",
"id": "2bc5bebe325b1498911f2f0cfb2b18c9100fa390",
"size": "7220",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "priv/python/pyebf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Erlang",
"bytes": "355533"
},
{
"name": "Java",
"bytes": "23835"
},
{
"name": "Makefile",
"bytes": "2231"
},
{
"name": "Python",
"bytes": "15692"
},
{
"name": "Shell",
"bytes": "133"
}
],
"symlink_target": ""
} |
"""Defines protocol specifications and messages."""
import importlib
import logging
from google.protobuf import message
from google.protobuf import reflection
import stl.base # pylint: disable=g-bad-import-order
import stl.lib # pylint: disable=g-bad-import-order
import stl.levenshtein # pylint: disable=g-bad-import-order
class Message(stl.base.NamedObject):
"""Protocol specifications.
A protocol specification is defined with "message" or "message[]" keywords.
It allows nested messages. For example,
message mProtocolExample {
encode "json";
required string field1;
optional integer field2;
repeated mNestedMessage field3;
message mNestedMessage {
optional string fieldInNestedMessage;
}
}
Attributes:
encode_name: Name for the encoding to use for this message (e.g. "json").
encoding: An stl.lib.Encoding object which will be used to serialize and
deserialize MessageValues.
is_array: Whether this message is an array.
fields: List of fields (stl.base.Field) defined in this protocol or message.
messages: Map of nested messages and their name.
"""
def __init__(self, name, encode_name, is_array):
stl.base.NamedObject.__init__(self, name)
self.encode_name = encode_name
# Encode name might not be provided if this is a sub-message.
if self.encode_name:
module, encoding = encode_name.rsplit('.', 1)
self.encoding = importlib.import_module(module).__getattribute__(
encoding)()
self.is_array = is_array
self.fields = []
self.messages = {}
def __eq__(self, other):
return (stl.base.NamedObject.__eq__(self, other) and
self.encode_name == other.encode_name and
self.is_array == other.is_array and self.fields == other.fields and
self.messages == other.messages)
def __str__(self):
if self.is_array:
pattern = 'MESSAGE(%s)[] %s: f(%s) m(%s)'
else:
pattern = 'MESSAGE(%s) %s: f(%s) m(%s)'
return pattern % (self.encode_name, self.name, stl.base.GetCSV(self.fields),
stl.base.GetCSV(self.messages))
def Resolve(self, env, resolved_fields):
logging.log(1, 'Resolving ' + self.name)
msg_value = MessageValue(self.name, self)
outer_messages = [env['_current_module'].messages]
if self.is_array:
msg_value.value_dict_or_array = self.ValidateArray(
resolved_fields, outer_messages)
else:
msg_value.value_dict_or_array = self.ValidateDict(resolved_fields,
outer_messages)
return msg_value
def ValidateArray(self, array_value, outer_messages):
return [
self.ValidateDict(dict_value, outer_messages)
for dict_value in array_value
]
def ValidateDict(self, dict_value, outer_messages):
"""Validate a dictionary value.
It checks whether all individual fields of |dict_value| are valid, i.e.
all required fields exist and the values of fields correspond to their
types.
Args:
dict_value: Dictionary value to validate.
outer_messages: Messages visible from the scope of |dict_value|.
Returns:
Dictionary value validated.
Raises:
NameError: If any required fields are missed.
"""
valid_dict = {}
for f in self.fields:
if f.name in dict_value:
valid_dict[f.name] = self._ValidateField(f, dict_value[f.name],
outer_messages)
elif not f.optional:
raise NameError("Mandatoray field missing in message '%s': %s" %
(self.name, f.name))
return valid_dict
def _ValidateField(self, field, value, outer_messages):
"""Validate a field of dictionary value according to its type."""
if field.repeated: # Array
if not isinstance(value, list):
raise ValueError("Value list expected in field '%s' in message '%s'" %
(field.name, self.name))
temp_field = stl.base.Field(field.name,
field.type_) # clear repeated flag
return [self._ValidateField(temp_field, e, outer_messages) for e in value]
if field.type_ == 'bool':
if (value is None or isinstance(value, bool) or
(isinstance(value, stl.base.LocalVar) and value.type_ == 'bool') or
Message._IsValidFunc(value, 'bool')):
return value
raise ValueError("Boolean value expected in field '%s' in message '%s'" %
(field.name, self.name))
if field.type_ == 'int':
if (value is None or isinstance(value, int) or
(isinstance(value, stl.base.LocalVar) and value.type_ == 'int') or
Message._IsValidFunc(value, 'int')):
return value
raise ValueError("Integer value expected in field '%s' in message '%s'" %
(field.name, self.name))
if field.type_ == 'string':
if (value is None or stl.base.IsString(value) or
(isinstance(value, stl.base.LocalVar) and value.type_ == 'string') or
Message._IsValidFunc(value, 'string')):
return value
if isinstance(value, MessageValue): # Message value must be serialized.
return value
raise ValueError("String value expected in field '%s' in message '%s'" %
(field.name, self.name))
# Sub-message or dictionary.
sub_msg = None
if field.type_ in self.messages:
sub_msg = self.messages[field.type_]
else:
for m in outer_messages:
if field.type_ in m:
sub_msg = m[field.type_]
break
if not sub_msg:
did_you_mean = stl.levenshtein.closest_candidate(
field.type,
sum((m.keys() for m in outer_messages + self.messages), []))
raise NameError('Cannot find a message: %s. Did you mean %s?' %
(field.type_, did_you_mean))
if not isinstance(value, dict):
raise ValueError("Struct value expected in field '%s' in message '%s'" %
(field.name, self.name))
return sub_msg.ValidateDict(value, outer_messages + [self.messages])
@staticmethod
def _IsValidFunc(value, type_):
"""Whether or not a function |value| is compatible with |type|."""
if (isinstance(value, stl.base.FuncGetField) and
isinstance(value.obj, stl.base.Role) and
value.obj.fields[value.field].type_ == type_):
return True
if isinstance(value, stl.base.FuncSet):
if isinstance(value.obj, stl.base.LocalVar) and value.obj.type_ == type_:
return True
if (isinstance(value.obj, stl.base.Role) and
value.obj.fields[value.field].type_ == type_):
return True
if isinstance(
value, stl.base.QualifierValue.Resolved) and value.qual_type == type_:
return True
return None
class MessageFromExternal(Message):
"""A protocol specification external.
message mProtocolExample {
encode "json";
external "external.class.Name";
Attributes:
external: External message class name.
descriptor: google.protobuf.descriptor.Descriptor to generate self.external.
"""
def __init__(self, name, encode_name, is_array, external):
Message.__init__(self, name, encode_name, is_array)
# Import external message type if it is passed as a string.
if stl.base.IsString(external):
module, message_type = external.rsplit('.', 1)
self.descriptor = importlib.import_module(module).__getattribute__(
message_type).DESCRIPTOR
# Otherwise, a descriptor should be passed.
else:
self.descriptor = external
self.external = MessageFromExternal._MakeClass(self.descriptor)
self._ExtractFieldsFromDesc(self.descriptor)
def __str__(self):
return '%s b(%s)' % (Message.__str__(self), self.descriptor.name)
def _ExtractFieldsFromDesc(self, desc):
for f in desc.fields:
if f.type == f.TYPE_MESSAGE:
if f.name not in self.messages:
self.messages[f.name] = MessageFromExternal(f.name, self.encode_name,
False, f.message_type)
type_ = f.name
else:
type_ = MessageFromExternal._GetFieldType(f)
field = stl.base.Field(f.name, type_, f.label == f.LABEL_OPTIONAL,
f.label == f.LABEL_REPEATED)
self.fields.append(field)
def __deepcopy__(self, memo):
return MessageFromExternal(self.name, self.encode_name, self.is_array,
self.descriptor)
@staticmethod
def _GetFieldType(f):
"""Return type for protobuf field, |f|."""
if f.type == f.TYPE_BOOL:
return 'bool'
if f.type == f.TYPE_STRING:
return 'string'
if (f.type == f.TYPE_ENUM or f.type == f.TYPE_FIXED32 or
f.type == f.TYPE_FIXED64 or f.type == f.TYPE_INT32 or
f.type == f.TYPE_INT64 or f.type == f.TYPE_SFIXED32 or
f.type == f.TYPE_SFIXED64 or f.type == f.TYPE_SINT32 or
f.type == f.TYPE_SINT64 or f.type == f.TYPE_UINT32 or
f.type == f.TYPE_UINT64):
return 'int'
# TODO(byungchul): Need to support more types
# raise NotImplementedError('Not supported protobuf type: ' + f.name)
return 'int'
@staticmethod
def _MakeClass(descriptor):
"""Utility function for generating an external class from a Descriptor."""
attributes = {}
for name, nested_type in descriptor.nested_types_by_name.items():
attributes[name] = MessageFromExternal._MakeClass(nested_type)
attributes[
reflection.GeneratedProtocolMessageType._DESCRIPTOR_KEY] = descriptor # pylint: disable=protected-access, line-too-long
return reflection.GeneratedProtocolMessageType(
str(descriptor.name), (message.Message,), attributes)
class MessageValue(stl.base.NamedObject):
"""Message instances.
A message instance is expanded from a protocol spec, Message and values
corresponding to fields.
Attributes:
msg: Protocol spec (message.Message)
value_dict_or_array: Map (or array of maps) of protocol fields and values.
"""
def __init__(self, name, msg):
stl.base.NamedObject.__init__(self, name)
self.msg = msg
self.value_dict_or_array = None
def __str__(self):
return 'MESSAGE-VALUE %s: v(%s)' % (self.name,
str(self.value_dict_or_array))
def Encode(self):
"""Encode this message instance into actual data stream.
The supported encoding methods are: json, protobuf, and user-defined
encodings.
Returns:
A string encoded.
"""
assert self.value_dict_or_array is not None
logging.log(1, 'Encoding ' + self.name)
resolved = MessageValue._ResolveVars(self.value_dict_or_array)
logging.debug('Resolved: ' + str(resolved))
return self.msg.encoding.SerializeToString(resolved, self.msg)
def _EncodeToString(self):
"""Coerce to string type."""
return self.Encode()
def Match(self, encoded):
"""Whether or not |encoded| is compatible with this message instance.
If |encoded| has all required fields, and values of all fields are same to
those of this message instance, it is compatible. Otherwise, i.e
1) it doesn't have some required fields
2) it has some values of fields different from specified in |value_dict| of
this message instance
Args:
encoded: A string expected to be encoded with same encoding method of
this message instance.
Returns:
Whether or not |encoded| is compatible with this message instance.
"""
logging.log(1, 'Decoding %s: %s', self.name, encoded)
decoded = self.msg.encoding.ParseFromString(encoded, self.msg)
logging.info('Matching message value:\nExpected: %s\nActual: %s\n',
self.value_dict_or_array, decoded)
return MessageValue._MatchValue(self.value_dict_or_array, decoded)
def _MatchFromString(self, encoded_string):
return self.Match(encoded_string)
@staticmethod
def _ResolveVars(value):
"""Resolve any variables or run any functions in |value|.
Args:
value: Value which may have variables or functions to resolve.
Returns:
Resolved value.
Raises:
ValueError: If a concrete value for |value| cannot be determined.
"""
if isinstance(value, dict):
resolved_value = {}
for k, v in value.items():
resolved_value[k] = MessageValue._ResolveVars(v)
return resolved_value
if isinstance(value, list):
return [MessageValue._ResolveVars(v) for v in value]
if isinstance(value, stl.base.QualifierValue.Resolved):
return value.Generate()
if isinstance(value, stl.base.LocalVar):
# Local var is initialized with random value.
if value.value is None:
raise ValueError("LocalVar '%s' does not have a value." % value.name)
return value.value
if isinstance(value, stl.base.Func):
return value.Run()
if isinstance(value, MessageValue):
# type must be string. Coerce value to string.
return value._EncodeToString() # pylint: disable=protected-access
return value
@staticmethod
def _MatchValue(expected, actual):
"""Whether or not |expected| is same value of |actual|.
Args:
expected: Expected value.
actual: Actual value.
Returns:
True if:
1) Type of |expected| and of |actual| must be same.
2) If type of |expected| is dictionary or sub-message, all fields
specified in |expected| must have same value in |actual|.
3) If type of |expected| is array, all entries specified in |expected|
must exist in |actual| in any order.
4) If type of |expected| is either integer or string, |expected| must
be same to |actual|.
"""
if isinstance(expected, dict):
if not isinstance(actual, dict):
return False
for k, v in expected.items():
if k not in actual:
logging.log(1, 'Not exist: field=' + k)
return False
if not MessageValue._MatchValue(v, actual[k]):
logging.log(1, 'Different: field=%s, expected=%s, actual=%s', k, v,
actual[k])
return False
return True
if isinstance(expected, list):
if not isinstance(actual, list):
return False
for e in expected:
found = False
for a in actual:
if MessageValue._MatchValue(e, a):
found = True
break
if not found:
return False
return True
if isinstance(expected, stl.base.QualifierValue.Resolved):
return expected.ValidateAndSet(actual)
if isinstance(expected, stl.base.FuncSet):
# TODO(byungchul): Type checking.
expected.SetValue(actual)
return True
if isinstance(expected, stl.base.LocalVar):
return expected.value == actual
if isinstance(expected, stl.base.Func):
return expected.Run() == actual
if isinstance(expected, MessageValue):
# type must be string.
return expected._MatchFromString(actual) # pylint: disable=protected-access
return expected == actual
| {
"content_hash": "e525fbf08457b2fa29c4921f041ed44e",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 128,
"avg_line_length": 35.801886792452834,
"alnum_prop": 0.6357707509881423,
"repo_name": "google/sprockets",
"id": "dd8143bb5f7f9eab3a25665c5b764971cf862073",
"size": "15776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stl/message.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "148"
},
{
"name": "Python",
"bytes": "201185"
},
{
"name": "Vim Script",
"bytes": "2816"
}
],
"symlink_target": ""
} |
'''
Given a string containing just the characters '(' and ')', find the length of the longest valid (well-formed) parentheses substring.
For "(()", the longest valid parentheses substring is "()", which has length = 2.
Another example is ")()())", where the longest valid parentheses substring is "()()", which has length = 4.
'''
class Solution:
# @param {string} s
# @return {integer}
def longestValidParentheses(self, s):
size = len(s)
if size < 2:
return 0
max_len = 0
count = 0
result = 0
marker = list(s)
for index in range(size):
if marker[index] == '(':
count += 1
elif count > 0:
count -= 1
else:
count = 0
marker[index] = '*'
count = 0
for index in range(size - 1, -1, -1):
if marker[index] == '*':
max_len = max(max_len, result)
result = 0
count = 0
elif marker[index] == ')':
count += 1
else:
if count > 0:
result += 1
count -= 1
else:
max_len = max(max_len, result)
result = 0
count = 0
max_len = max(max_len, result)
print marker
return 2 * max_len
def longestValidParenthesesDP(self, s):
b """
:type s: str
:rtype: int
"""
size = len(s)
if size < 2:
return 0
dp = [0] * (size)
for index in range(1,size):
char = s[index]
_index = index - 1 - dp[index-1]
if (char == "(" or _index < 0 or s[_index] == ")"):
dp[index] = 0
else:
dp[index] = 2 + dp[index-1] + dp[_index-1]
print dp
return max(dp)
if __name__ == '__main__':
solution = Solution()
print solution.longestValidParenthesesDP('(()))(()())')
| {
"content_hash": "ebccd8561588bfe9ecf3c01bed7d282a",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 132,
"avg_line_length": 29.753623188405797,
"alnum_prop": 0.4378957622990745,
"repo_name": "shub0/algorithm-data-structure",
"id": "8b16054627e3fac2b6b317c3b303dee08be08198",
"size": "2073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/longest_valid_parenthesis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "166293"
},
{
"name": "Python",
"bytes": "487573"
}
],
"symlink_target": ""
} |
import webbrowser
webbrowser.open("http://www.w3.org/TR/SVG11/")
| {
"content_hash": "88cdc0ac3a8abb587183299177338228",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 46,
"avg_line_length": 32.5,
"alnum_prop": 0.7538461538461538,
"repo_name": "NirBenTalLab/proorigami-cde-package",
"id": "9ea131e4bd5b2d5022968fef15f61191407add8b",
"size": "88",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cde-root/usr/local/apps/inkscape/share/inkscape/extensions/webbrowser_svgspec.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "16762"
},
{
"name": "Python",
"bytes": "4730244"
},
{
"name": "Shell",
"bytes": "9915"
}
],
"symlink_target": ""
} |
import configparser
import json
import logging
import os
import re
import tempfile
import time
import requests
from requests.auth import HTTPBasicAuth
try:
import urllib3
except ImportError:
from requests.packages import urllib3
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
# http://stackoverflow.com/questions/10588644/how-can-i-see-the-entire-http-request-thats-being-sent-by-my-python-application
# Uncomment this to see requests and responses.
# TODO: We need better way and we should log requests and responses in
# log file.
# http_client.HTTPConnection.debuglevel = 1
urllib3.disable_warnings()
resource_to_endpoint = {
'job': 'endeavour/job',
'jobsession': 'endeavour/jobsession',
'log': 'endeavour/log',
'association': 'endeavour/association',
'workflow': 'spec/storageprofile',
'policy': 'endeavour/policy',
'user': 'security/user',
'resourcepool': 'security/resourcepool',
'role': 'security/role',
'identityuser': 'identity/user',
'identitycredential': 'identity/user',
'appserver': 'appserver',
'oracle': 'application/oracle',
'site': 'site',
}
resource_to_listfield = {
'identityuser': 'users',
'identitycredential': 'users',
'policy': 'policies',
'ldap': 'ldapServers',
'pure': 'purestorages',
'workflow': 'storageprofiles',
'resourcepool': 'resourcePools',
}
def build_url(baseurl, restype=None, resid=None, path=None, endpoint=None):
url = baseurl
if restype is not None:
ep = resource_to_endpoint.get(restype, None)
if not ep:
if endpoint is not None:
ep = endpoint
else:
ep = restype
url = url + "/" + ep
if resid is not None:
url = url + "/" + str(resid)
if path is not None:
if not path.startswith('/'):
path = '/' + path
url = url + path
return url
def raise_response_error(r, *args, **kwargs):
r.raise_for_status()
def pretty_print(data):
return logging.info(json.dumps(data, sort_keys=True,indent=4, separators=(',', ': ')))
def change_password(url, username, password, newpassword):
data = {'newPassword': newpassword}
conn = requests.Session()
conn.verify = False
# conn.hooks.update({'response': raise_response_error})
# conn.headers.update({'X-Endeavour-Sessionid': self.sessionid})
conn.headers.update({'Content-Type': 'application/json'})
conn.headers.update({'Accept': 'application/json'})
return conn.post("%s/api/endeavour/session?changePassword=true&screenInfo=1" % url, json=data,
auth=HTTPBasicAuth(username, password))
class EcxSession(object):
def __init__(self, url, username=None, password=None, sessionid=None):
self.url = url
self.api_url = url + '/api'
self.username = username
self.password = password
self.sessionid = sessionid
self.conn = requests.Session()
self.conn.verify = False
self.conn.hooks.update({'response': raise_response_error})
if not self.sessionid:
if self.username and self.password:
self.login()
else:
raise Exception('Please provide login credentials.')
self.conn.headers.update({'X-Endeavour-Sessionid': self.sessionid})
self.conn.headers.update({'Content-Type': 'application/json'})
self.conn.headers.update({'Accept': 'application/json'})
def login(self):
r = self.conn.post("%s/endeavour/session" % self.api_url, auth=HTTPBasicAuth(self.username, self.password))
self.sessionid = r.json()['sessionid']
def __repr__(self):
return 'EcxSession: user: %s' % self.username
def get(self, restype=None, resid=None, path=None, params={}, endpoint=None, url=None):
if url is None:
url = build_url(self.api_url, restype, resid, path, endpoint)
return json.loads(self.conn.get(url, params=params).content)
def stream_get(self, restype=None, resid=None, path=None, params={}, endpoint=None, url=None, outfile=None):
if url is None:
url = build_url(self.api_url, restype, resid, path, endpoint)
r = self.conn.get(url, params=params)
logging.info("headers: %s" % r.headers)
# The response header Content-Disposition contains default file name
# Content-Disposition: attachment; filename=log_1490030341274.zip
default_filename = re.findall('filename=(.+)', r.headers['Content-Disposition'])[0]
if not outfile:
if not default_filename:
raise Exception("Couldn't get the file name to save the contents.")
outfile = os.path.join(tempfile.mkdtemp(), default_filename)
with open(outfile, 'wb') as fd:
for chunk in r.iter_content(chunk_size=64*1024):
fd.write(chunk)
return outfile
def delete(self, restype=None, resid=None, path=None, params={}, endpoint=None, url=None):
if url is None:
url = build_url(self.api_url, restype, resid, path, endpoint)
resp = self.conn.delete(url, params=params)
return json.loads(resp.content) if resp.content else None
def post(self, restype=None, resid=None, path=None, data={}, params={}, endpoint=None, url=None):
if url is None:
url = build_url(self.api_url, restype, resid, path, endpoint)
logging.info(json.dumps(data, indent=4))
r = self.conn.post(url, json=data, params=params)
if r.content:
#return json.loads(r.content.decode('utf-8'))
return r.json()
return {}
def put(self, restype=None, resid=None, path=None, data={}, params={}, endpoint=None, url=None):
if url is None:
url = build_url(self.api_url, restype, resid, path, endpoint)
logging.info(json.dumps(data, indent=4))
r = self.conn.put(url, json=data, params=params)
if r.content:
return json.loads(r.content)
return {}
class EcxAPI(object):
def __init__(self, ecx_session, restype=None, endpoint=None):
self.ecx_session = ecx_session
self.restype = restype
self.endpoint = endpoint
self.list_field = resource_to_listfield.get(restype, self.restype + 's')
def get(self, resid=None, path=None, params={}, url=None):
return self.ecx_session.get(restype=self.restype, resid=resid, path=path, params=params, url=url)
def stream_get(self, resid=None, path=None, params={}, url=None, outfile=None):
return self.ecx_session.stream_get(restype=self.restype, resid=resid, path=path,
params=params, url=url, outfile=outfile)
def delete(self, resid):
return self.ecx_session.delete(restype=self.restype, resid=resid)
def list(self):
return self.ecx_session.get(restype=self.restype)[self.list_field]
def post(self, resid=None, path=None, data={}, params={}, url=None):
return self.ecx_session.post(restype=self.restype, resid=resid, path=path, data=data,
params=params, url=url)
def put(self, resid=None, path=None, data={}, params={}, url=None):
return self.ecx_session.put(restype=self.restype, resid=resid, path=path, data=data,
params=params, url=url)
class JobAPI(EcxAPI):
def __init__(self, ecx_session):
super(JobAPI, self).__init__(ecx_session, 'job')
# TODO: May need to check this API seems to return null instead of current status
# Can use lastSessionStatus property in the job object for now
def status(self, jobid):
return self.ecx_session.get(restype=self.restype, resid=jobid, path='status')
# TODO: Accept a callback that can be called every time job status is polled.
# The process of job start is different depending on whether jobs have storage
# workflows.
def run(self, jobid, workflowid=None):
job = self.ecx_session.get(restype=self.restype, resid=jobid)
links = job['links']
if 'start' not in links:
raise Exception("'start' link not found for job: %d" % jobid)
start_link = links['start']
reqdata = {}
if 'schema' in start_link:
# The job has storage profiles.
schema_data = self.ecx_session.get(url=start_link['schema'])
workflows = schema_data['parameter']['actionname']['values']
if not workflows:
raise Exception("No workflows for job: %d" % jobid)
if len(workflows) > 1:
if(workflowid is None):
raise Exception("Workflow ID not provided")
else:
reqdata["actionname"] = workflowid
else:
reqdata["actionname"] = workflows[0]['value']
return self.ecx_session.post(url=start_link['href'], data=reqdata)
def get_log_entries(self, jobsession_id, page_size=1000, page_start_index=0):
logging.info("*** get_log_entries: jobsession_id = %s, page_start_index: %s ***" % (jobsession_id, page_start_index))
resp = self.ecx_session.get(restype='log', path='job',
params={'pageSize': page_size, 'pageStartIndex': page_start_index,
'sort': '[{"property":"logTime","direction":"ASC"}]',
'filter': '[{"property":"jobsessionId","value":"%s"}]'%jobsession_id})
logging.info("*** get_log_entries: Received %d entries..." % len(resp['logs']))
return resp['logs']
class UserIdentityAPI(EcxAPI):
def __init__(self, ecx_session):
super(UserIdentityAPI, self).__init__(ecx_session, 'identityuser')
def create(self, data):
return self.post(data=data)
class AppserverAPI(EcxAPI):
def __init__(self, ecx_session):
super(AppserverAPI, self).__init__(ecx_session, 'appserver')
class VsphereAPI(EcxAPI):
def __init__(self, ecx_session):
super(VsphereAPI, self).__init__(ecx_session, 'vsphere')
class ResProviderAPI(EcxAPI):
# Credential info is passed in different field names so we need to maintain
# the mapping.
user_field_name_map = {"appserver": "osuser", "purestorage": "user", "emcvnx": "user"}
# Resource type doesn't always correspond to API so we need a map.
res_api_map = {"purestorage": "pure"}
def __init__(self, ecx_session, restype):
super(ResProviderAPI, self).__init__(ecx_session, ResProviderAPI.res_api_map.get(restype, restype))
def register(self, name, host, osuser_identity, appType=None, osType=None, catalog=True, ssl=True, vsphere_id=None):
osuser_field = ResProviderAPI.user_field_name_map.get(self.restype, 'user')
reqdata = {
"name": name, "hostAddress": host, "addToCatJob": catalog,
}
reqdata[osuser_field] = {
"href": osuser_identity['links']['self']['href']
}
if vsphere_id:
reqdata["serverType"] = "virtual"
reqdata["vsphereId"] = vsphere_id
if appType:
reqdata["applicationType"] = appType
reqdata["useKeyAuthentication"] = False
if osType:
reqdata["osType"] = osType
return self.post(data=reqdata)
class AssociationAPI(EcxAPI):
def __init__(self, ecx_session):
super(AssociationAPI, self).__init__(ecx_session, 'association')
def get_using_resources(self, restype, resid):
return self.get(path="resource/%s/%s" % (restype, resid), params={"action": "listUsingResources"})
class LogAPI(EcxAPI):
def __init__(self, ecx_session):
super(LogAPI, self).__init__(ecx_session, 'log')
def download_logs(self, outfile=None):
return self.stream_get(path="download/diagnostics", outfile=outfile)
class OracleAPI(EcxAPI):
def __init__(self, ecx_session):
super(OracleAPI, self).__init__(ecx_session, 'oracle')
def get_instances(self):
return self.get(path="oraclehome")
def get_databases_in_instance(self, instanceid):
return self.get(path="oraclehome/%s/database" % instanceid)
def get_database_copy_versions(self, instanceid, databaseid):
return self.get(path="oraclehome/%s/database/%s" % (instanceid, databaseid) + "/version")
| {
"content_hash": "ebedb5680f9801215fdefec17bcd86cf",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 125,
"avg_line_length": 36.98529411764706,
"alnum_prop": 0.6172564612326044,
"repo_name": "catalogicsoftware/ecxclient",
"id": "0df780762c505ce17477f4f42f56570e613ea236",
"size": "12576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ecxclient/sdk/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26332"
}
],
"symlink_target": ""
} |
import unittest
import time
import eventlet
import mock
from contextlib import contextmanager
from threading import Thread
from test.unit import FakeLogger
from swift.common.middleware import ratelimit
from swift.proxy.controllers.base import get_container_memcache_key, \
headers_to_container_info
from swift.common.memcached import MemcacheConnectionError
from swift.common.swob import Request
from swift.common import utils
class FakeMemcache(object):
def __init__(self):
self.store = {}
self.error_on_incr = False
self.init_incr_return_neg = False
def get(self, key):
return self.store.get(key)
def set(self, key, value, serialize=False, time=0):
self.store[key] = value
return True
def incr(self, key, delta=1, time=0):
if self.error_on_incr:
raise MemcacheConnectionError('Memcache restarting')
if self.init_incr_return_neg:
# simulate initial hit, force reset of memcache
self.init_incr_return_neg = False
return -10000000
self.store[key] = int(self.store.setdefault(key, 0)) + int(delta)
if self.store[key] < 0:
self.store[key] = 0
return int(self.store[key])
def decr(self, key, delta=1, time=0):
return self.incr(key, delta=-delta, time=time)
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
yield True
def delete(self, key):
try:
del self.store[key]
except Exception:
pass
return True
def mock_http_connect(response, headers=None, with_exc=False):
class FakeConn(object):
def __init__(self, status, headers, with_exc):
self.status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.with_exc = with_exc
self.headers = headers
if self.headers is None:
self.headers = {}
def getresponse(self):
if self.with_exc:
raise Exception('test')
return self
def getheader(self, header):
return self.headers[header]
def read(self, amt=None):
return ''
def close(self):
return
return lambda *args, **kwargs: FakeConn(response, headers, with_exc)
class FakeApp(object):
def __call__(self, env, start_response):
return ['204 No Content']
def start_response(*args):
pass
time_ticker = 0
time_override = []
def mock_sleep(x):
global time_ticker
time_ticker += x
def mock_time():
global time_override
global time_ticker
if time_override:
cur_time = time_override.pop(0)
if cur_time is None:
time_override = [None if i is None else i + time_ticker
for i in time_override]
return time_ticker
return cur_time
return time_ticker
class TestRateLimit(unittest.TestCase):
def _reset_time(self):
global time_ticker
time_ticker = 0
def setUp(self):
self.was_sleep = eventlet.sleep
eventlet.sleep = mock_sleep
self.was_time = time.time
time.time = mock_time
self._reset_time()
def tearDown(self):
eventlet.sleep = self.was_sleep
time.time = self.was_time
def _run(self, callable_func, num, rate, check_time=True):
global time_ticker
begin = time.time()
for x in range(num):
callable_func()
end = time.time()
total_time = float(num) / rate - 1.0 / rate # 1st request not limited
# Allow for one second of variation in the total time.
time_diff = abs(total_time - (end - begin))
if check_time:
self.assertEquals(round(total_time, 1), round(time_ticker, 1))
return time_diff
def test_get_maxrate(self):
conf_dict = {'container_ratelimit_10': 200,
'container_ratelimit_50': 100,
'container_ratelimit_75': 30}
test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
test_ratelimit.logger = FakeLogger()
self.assertEquals(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 0), None)
self.assertEquals(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 5), None)
self.assertEquals(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 10), 200)
self.assertEquals(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 60), 72)
self.assertEquals(ratelimit.get_maxrate(
test_ratelimit.container_ratelimits, 160), 30)
def test_get_ratelimitable_key_tuples(self):
current_rate = 13
conf_dict = {'account_ratelimit': current_rate,
'container_ratelimit_3': 200}
fake_memcache = FakeMemcache()
fake_memcache.store[get_container_memcache_key('a', 'c')] = \
{'object_count': '5'}
the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache
req = lambda: None
req.environ = {'swift.cache': fake_memcache, 'PATH_INFO': '/v1/a/c/o'}
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
req.method = 'DELETE'
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
req, 'a', None, None)), 0)
req.method = 'PUT'
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None)), 1)
req.method = 'DELETE'
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None)), 1)
req.method = 'GET'
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', 'o')), 0)
req.method = 'PUT'
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', 'o')), 1)
def get_fake_ratelimit(*args, **kwargs):
return {'sysmeta': {'global-write-ratelimit': 10}}
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
get_fake_ratelimit):
req.method = 'PUT'
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None)), 2)
self.assertEquals(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None)[1], ('ratelimit/global-write/a', 10))
def get_fake_ratelimit(*args, **kwargs):
return {'sysmeta': {'global-write-ratelimit': 'notafloat'}}
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
get_fake_ratelimit):
req.method = 'PUT'
self.assertEquals(len(the_app.get_ratelimitable_key_tuples(
req, 'a', 'c', None)), 1)
def test_memcached_container_info_dict(self):
mdict = headers_to_container_info({'x-container-object-count': '45'})
self.assertEquals(mdict['object_count'], '45')
def test_ratelimit_old_memcache_format(self):
current_rate = 13
conf_dict = {'account_ratelimit': current_rate,
'container_ratelimit_3': 200}
fake_memcache = FakeMemcache()
fake_memcache.store[get_container_memcache_key('a', 'c')] = \
{'container_size': 5}
the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache
req = lambda: None
req.method = 'PUT'
req.environ = {'PATH_INFO': '/v1/a/c/o', 'swift.cache': fake_memcache}
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
tuples = the_app.get_ratelimitable_key_tuples(req, 'a', 'c', 'o')
self.assertEquals(tuples, [('ratelimit/a/c', 200.0)])
def test_account_ratelimit(self):
current_rate = 5
num_calls = 50
conf_dict = {'account_ratelimit': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
with mock.patch('swift.common.middleware.ratelimit.get_container_info',
lambda *args, **kwargs: {}):
with mock.patch(
'swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
for meth, exp_time in [('DELETE', 9.8), ('GET', 0),
('POST', 0), ('PUT', 9.8)]:
req = Request.blank('/v/a%s/c' % meth)
req.method = meth
req.environ['swift.cache'] = FakeMemcache()
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
begin = time.time()
self._run(make_app_call, num_calls, current_rate,
check_time=bool(exp_time))
self.assertEquals(round(time.time() - begin, 1), exp_time)
self._reset_time()
def test_ratelimit_set_incr(self):
current_rate = 5
num_calls = 50
conf_dict = {'account_ratelimit': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
req = Request.blank('/v/a/c')
req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].init_incr_return_neg = True
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
begin = time.time()
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
self._run(make_app_call, num_calls, current_rate, check_time=False)
self.assertEquals(round(time.time() - begin, 1), 9.8)
def test_ratelimit_whitelist(self):
global time_ticker
current_rate = 2
conf_dict = {'account_ratelimit': current_rate,
'max_sleep_time_seconds': 2,
'account_whitelist': 'a',
'account_blacklist': 'b'}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
req = Request.blank('/v/a/c')
req.environ['swift.cache'] = FakeMemcache()
class rate_caller(Thread):
def __init__(self, parent):
Thread.__init__(self)
self.parent = parent
def run(self):
self.result = self.parent.test_ratelimit(req.environ,
start_response)
nt = 5
threads = []
for i in range(nt):
rc = rate_caller(self)
rc.start()
threads.append(rc)
for thread in threads:
thread.join()
the_498s = [
t for t in threads if ''.join(t.result).startswith('Slow down')]
self.assertEquals(len(the_498s), 0)
self.assertEquals(time_ticker, 0)
def test_ratelimit_blacklist(self):
global time_ticker
current_rate = 2
conf_dict = {'account_ratelimit': current_rate,
'max_sleep_time_seconds': 2,
'account_whitelist': 'a',
'account_blacklist': 'b'}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
self.test_ratelimit.logger = FakeLogger()
self.test_ratelimit.BLACK_LIST_SLEEP = 0
ratelimit.http_connect = mock_http_connect(204)
req = Request.blank('/v/b/c')
req.environ['swift.cache'] = FakeMemcache()
class rate_caller(Thread):
def __init__(self, parent):
Thread.__init__(self)
self.parent = parent
def run(self):
self.result = self.parent.test_ratelimit(req.environ,
start_response)
nt = 5
threads = []
for i in range(nt):
rc = rate_caller(self)
rc.start()
threads.append(rc)
for thread in threads:
thread.join()
the_497s = [
t for t in threads if ''.join(t.result).startswith('Your account')]
self.assertEquals(len(the_497s), 5)
self.assertEquals(time_ticker, 0)
def test_ratelimit_max_rate_double(self):
global time_ticker
global time_override
current_rate = 2
conf_dict = {'account_ratelimit': current_rate,
'clock_accuracy': 100,
'max_sleep_time_seconds': 1}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
self.test_ratelimit.log_sleep_time_seconds = .00001
req = Request.blank('/v/a/c')
req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache()
time_override = [0, 0, 0, 0, None]
# simulates 4 requests coming in at same time, then sleeping
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
r = self.test_ratelimit(req.environ, start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], 'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], 'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], '204 No Content')
def test_ratelimit_max_rate_double_container(self):
global time_ticker
global time_override
current_rate = 2
conf_dict = {'container_ratelimit_0': current_rate,
'clock_accuracy': 100,
'max_sleep_time_seconds': 1}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
self.test_ratelimit.log_sleep_time_seconds = .00001
req = Request.blank('/v/a/c/o')
req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].set(
get_container_memcache_key('a', 'c'),
{'container_size': 1})
time_override = [0, 0, 0, 0, None]
# simulates 4 requests coming in at same time, then sleeping
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
r = self.test_ratelimit(req.environ, start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], 'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], 'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], '204 No Content')
def test_ratelimit_max_rate_double_container_listing(self):
global time_ticker
global time_override
current_rate = 2
conf_dict = {'container_listing_ratelimit_0': current_rate,
'clock_accuracy': 100,
'max_sleep_time_seconds': 1}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
self.test_ratelimit.log_sleep_time_seconds = .00001
req = Request.blank('/v/a/c')
req.method = 'GET'
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].set(
get_container_memcache_key('a', 'c'),
{'container_size': 1})
time_override = [0, 0, 0, 0, None]
# simulates 4 requests coming in at same time, then sleeping
r = self.test_ratelimit(req.environ, start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], 'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], 'Slow down')
mock_sleep(.1)
r = self.test_ratelimit(req.environ, start_response)
self.assertEquals(r[0], '204 No Content')
mc = self.test_ratelimit.memcache_client
try:
self.test_ratelimit.memcache_client = None
self.assertEquals(
self.test_ratelimit.handle_ratelimit(req, 'n', 'c', None),
None)
finally:
self.test_ratelimit.memcache_client = mc
def test_ratelimit_max_rate_multiple_acc(self):
num_calls = 4
current_rate = 2
conf_dict = {'account_ratelimit': current_rate,
'max_sleep_time_seconds': 2}
fake_memcache = FakeMemcache()
the_app = ratelimit.filter_factory(conf_dict)(FakeApp())
the_app.memcache_client = fake_memcache
req = lambda: None
req.method = 'PUT'
req.environ = {}
class rate_caller(Thread):
def __init__(self, name):
self.myname = name
Thread.__init__(self)
def run(self):
for j in range(num_calls):
self.result = the_app.handle_ratelimit(req, self.myname,
'c', None)
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
nt = 15
begin = time.time()
threads = []
for i in range(nt):
rc = rate_caller('a%s' % i)
rc.start()
threads.append(rc)
for thread in threads:
thread.join()
time_took = time.time() - begin
self.assertEquals(1.5, round(time_took, 1))
def test_call_invalid_path(self):
env = {'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '//v1/AUTH_1234567890',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '80',
'swift.cache': FakeMemcache(),
'SERVER_PROTOCOL': 'HTTP/1.0'}
app = lambda *args, **kwargs: ['fake_app']
rate_mid = ratelimit.filter_factory({})(app)
class a_callable(object):
def __call__(self, *args, **kwargs):
pass
resp = rate_mid.__call__(env, a_callable())
self.assert_('fake_app' == resp[0])
def test_no_memcache(self):
current_rate = 13
num_calls = 5
conf_dict = {'account_ratelimit': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
req = Request.blank('/v/a')
req.environ['swift.cache'] = None
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
begin = time.time()
self._run(make_app_call, num_calls, current_rate, check_time=False)
time_took = time.time() - begin
self.assertEquals(round(time_took, 1), 0) # no memcache, no limiting
def test_restarting_memcache(self):
current_rate = 2
num_calls = 5
conf_dict = {'account_ratelimit': current_rate}
self.test_ratelimit = ratelimit.filter_factory(conf_dict)(FakeApp())
ratelimit.http_connect = mock_http_connect(204)
req = Request.blank('/v/a/c')
req.method = 'PUT'
req.environ['swift.cache'] = FakeMemcache()
req.environ['swift.cache'].error_on_incr = True
make_app_call = lambda: self.test_ratelimit(req.environ,
start_response)
begin = time.time()
with mock.patch('swift.common.middleware.ratelimit.get_account_info',
lambda *args, **kwargs: {}):
self._run(make_app_call, num_calls, current_rate, check_time=False)
time_took = time.time() - begin
self.assertEquals(round(time_took, 1), 0) # no memcache, no limit
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_registered_defaults(self):
def check_key_is_absnet(key):
try:
swift_info[key]
except KeyError as err:
if key not in err:
raise
test_limits = {'account_ratelimit': 1,
'max_sleep_time_seconds': 60,
'container_ratelimit_0': 0,
'container_ratelimit_10': 10,
'container_ratelimit_50': 50,
'container_listing_ratelimit_0': 0,
'container_listing_ratelimit_10': 10,
'container_listing_ratelimit_50': 50}
ratelimit.filter_factory(test_limits)('have to pass in an app')
swift_info = utils.get_swift_info()
self.assertTrue('ratelimit' in swift_info)
self.assertEqual(swift_info['ratelimit']
['account_ratelimit'], 1.0)
self.assertEqual(swift_info['ratelimit']
['max_sleep_time_seconds'], 60.0)
self.assertEqual(swift_info['ratelimit']
['container_ratelimits'][0][0], 0)
self.assertEqual(swift_info['ratelimit']
['container_ratelimits'][0][1], 0.0)
self.assertEqual(swift_info['ratelimit']
['container_ratelimits'][1][0], 10)
self.assertEqual(swift_info['ratelimit']
['container_ratelimits'][1][1], 10.0)
self.assertEqual(swift_info['ratelimit']
['container_ratelimits'][2][0], 50)
self.assertEqual(swift_info['ratelimit']
['container_ratelimits'][2][1], 50.0)
self.assertEqual(swift_info['ratelimit']
['container_listing_ratelimits'][0][0], 0)
self.assertEqual(swift_info['ratelimit']
['container_listing_ratelimits'][0][1], 0.0)
self.assertEqual(swift_info['ratelimit']
['container_listing_ratelimits'][1][0], 10)
self.assertEqual(swift_info['ratelimit']
['container_listing_ratelimits'][1][1], 10.0)
self.assertEqual(swift_info['ratelimit']
['container_listing_ratelimits'][2][0], 50)
self.assertEqual(swift_info['ratelimit']
['container_listing_ratelimits'][2][1], 50.0)
# these were left out on purpose
for key in ['log_sleep_time_seconds', 'clock_accuracy',
'rate_buffer_seconds', 'ratelimit_whitelis',
'ratelimit_blacklist']:
check_key_is_absnet(key)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6939e707c8e47909ec14c0d978612325",
"timestamp": "",
"source": "github",
"line_count": 610,
"max_line_length": 79,
"avg_line_length": 39.350819672131145,
"alnum_prop": 0.5489085152474588,
"repo_name": "heemanshu/swift_juno",
"id": "12940eac85fbbe8912d99d1ecfa56f50a0fa4c7a",
"size": "24599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/common/middleware/test_ratelimit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5053977"
},
{
"name": "Shell",
"bytes": "950"
}
],
"symlink_target": ""
} |
__usage__ = """
To run tests locally:
python tests/test_arpack.py [-l<int>] [-v<int>]
"""
import threading
import itertools
import numpy as np
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
from pytest import raises as assert_raises
import pytest
from numpy import dot, conj, random
from scipy.linalg import eig, eigh
from scipy.sparse import csc_matrix, csr_matrix, diags, rand
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg.eigen.arpack import (eigs, eigsh, arpack,
ArpackNoConvergence)
from scipy._lib._gcutils import assert_deallocated, IS_PYPY
# precision for tests
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
def _get_test_tolerance(type_char, mattype=None):
"""
Return tolerance values suitable for a given test:
Parameters
----------
type_char : {'f', 'd', 'F', 'D'}
Data type in ARPACK eigenvalue problem
mattype : {csr_matrix, aslinearoperator, asarray}, optional
Linear operator type
Returns
-------
tol
Tolerance to pass to the ARPACK routine
rtol
Relative tolerance for outputs
atol
Absolute tolerance for outputs
"""
rtol = {'f': 3000 * np.finfo(np.float32).eps,
'F': 3000 * np.finfo(np.float32).eps,
'd': 2000 * np.finfo(np.float64).eps,
'D': 2000 * np.finfo(np.float64).eps}[type_char]
atol = rtol
tol = 0
if mattype is aslinearoperator and type_char in ('f', 'F'):
# iterative methods in single precision: worse errors
# also: bump ARPACK tolerance so that the iterative method converges
tol = 30 * np.finfo(np.float32).eps
rtol *= 5
if mattype is csr_matrix and type_char in ('f', 'F'):
# sparse in single precision: worse errors
rtol *= 5
return tol, rtol, atol
def generate_matrix(N, complex_=False, hermitian=False,
pos_definite=False, sparse=False):
M = np.random.random((N, N))
if complex_:
M = M + 1j * np.random.random((N, N))
if hermitian:
if pos_definite:
if sparse:
i = np.arange(N)
j = np.random.randint(N, size=N-2)
i, j = np.meshgrid(i, j)
M[i, j] = 0
M = np.dot(M.conj(), M.T)
else:
M = np.dot(M.conj(), M.T)
if sparse:
i = np.random.randint(N, size=N * N // 4)
j = np.random.randint(N, size=N * N // 4)
ind = np.nonzero(i == j)
j[ind] = (j[ind] + 1) % N
M[i, j] = 0
M[j, i] = 0
else:
if sparse:
i = np.random.randint(N, size=N * N // 2)
j = np.random.randint(N, size=N * N // 2)
M[i, j] = 0
return M
def generate_matrix_symmetric(N, pos_definite=False, sparse=False):
M = np.random.random((N, N))
M = 0.5 * (M + M.T) # Make M symmetric
if pos_definite:
Id = N * np.eye(N)
if sparse:
M = csr_matrix(M)
M += Id
else:
if sparse:
M = csr_matrix(M)
return M
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except AssertionError:
assert_allclose(actual, conj(desired), **kw)
def argsort_which(eigenvalues, typ, k, which,
sigma=None, OPpart=None, mode=None):
"""Return sorted indices of eigenvalues using the "which" keyword
from eigs and eigsh"""
if sigma is None:
reval = np.round(eigenvalues, decimals=_ndigits[typ])
else:
if mode is None or mode == 'normal':
if OPpart is None:
reval = 1. / (eigenvalues - sigma)
elif OPpart == 'r':
reval = 0.5 * (1. / (eigenvalues - sigma)
+ 1. / (eigenvalues - np.conj(sigma)))
elif OPpart == 'i':
reval = -0.5j * (1. / (eigenvalues - sigma)
- 1. / (eigenvalues - np.conj(sigma)))
elif mode == 'cayley':
reval = (eigenvalues + sigma) / (eigenvalues - sigma)
elif mode == 'buckling':
reval = eigenvalues / (eigenvalues - sigma)
else:
raise ValueError("mode='%s' not recognized" % mode)
reval = np.round(reval, decimals=_ndigits[typ])
if which in ['LM', 'SM']:
ind = np.argsort(abs(reval))
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
ind = np.argsort(np.real(reval))
elif which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
if typ.islower():
ind = np.argsort(abs(np.imag(reval)))
else:
ind = np.argsort(np.imag(reval))
else:
raise ValueError("which='%s' is unrecognized" % which)
if which in ['LM', 'LA', 'LR', 'LI']:
return ind[-k:]
elif which in ['SM', 'SA', 'SR', 'SI']:
return ind[:k]
elif which == 'BE':
return np.concatenate((ind[:k//2], ind[k//2-k:]))
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
mattype=np.asarray, OPpart=None, mode='normal'):
general = ('bmat' in d)
if symmetric:
eigs_func = eigsh
else:
eigs_func = eigs
if general:
err = ("error for %s:general, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
else:
err = ("error for %s:standard, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
a = d['mat'].astype(typ)
ac = mattype(a)
if general:
b = d['bmat'].astype(typ)
bc = mattype(b)
# get exact eigenvalues
exact_eval = d['eval'].astype(typ.upper())
ind = argsort_which(exact_eval, typ, k, which,
sigma, OPpart, mode)
exact_eval = exact_eval[ind]
# compute arpack eigenvalues
kwargs = dict(which=which, v0=v0, sigma=sigma)
if eigs_func is eigsh:
kwargs['mode'] = mode
else:
kwargs['OPpart'] = OPpart
# compute suitable tolerances
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype)
# on rare occasions, ARPACK routines return results that are proper
# eigenvalues and -vectors, but not necessarily the ones requested in
# the parameter which. This is inherent to the Krylov methods, and
# should not be treated as a failure. If such a rare situation
# occurs, the calculation is tried again (but at most a few times).
ntries = 0
while ntries < 5:
# solve
if general:
try:
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
else:
try:
eigenvalues, evec = eigs_func(ac, k, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eigenvalues, evec = eigs_func(ac, k, **kwargs)
ind = argsort_which(eigenvalues, typ, k, which,
sigma, OPpart, mode)
eigenvalues = eigenvalues[ind]
evec = evec[:, ind]
# check eigenvectors
LHS = np.dot(a, evec)
if general:
RHS = eigenvalues * np.dot(b, evec)
else:
RHS = eigenvalues * evec
assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
try:
# check eigenvalues
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol,
err_msg=err)
break
except AssertionError:
ntries += 1
# check eigenvalues
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol, err_msg=err)
class DictWithRepr(dict):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<%s>" % self.name
class SymmetricParams:
def __init__(self):
self.eigs = eigsh
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_modes = {None: ['normal'],
0.5: ['normal', 'buckling', 'cayley']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True).astype('F').astype('D')
Mc = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard symmetric problem
SS = DictWithRepr("std-symmetric")
SS['mat'] = Ar
SS['v0'] = v0
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
# general symmetric problem
GS = DictWithRepr("gen-symmetric")
GS['mat'] = Ar
GS['bmat'] = M
GS['v0'] = v0
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
# standard hermitian problem
SH = DictWithRepr("std-hermitian")
SH['mat'] = Ac
SH['v0'] = v0
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
# general hermitian problem
GH = DictWithRepr("gen-hermitian")
GH['mat'] = Ac
GH['bmat'] = M
GH['v0'] = v0
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
# general hermitian problem with hermitian M
GHc = DictWithRepr("gen-hermitian-Mc")
GHc['mat'] = Ac
GHc['bmat'] = Mc
GHc['v0'] = v0
GHc['eval'] = eigh(GHc['mat'], GHc['bmat'], eigvals_only=True)
self.real_test_cases = [SS, GS]
self.complex_test_cases = [SH, GH, GHc]
class NonSymmetricParams:
def __init__(self):
self.eigs = eigs
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_OPparts = {None: [None],
0.1: ['r'],
0.1 + 0.1j: ['r', 'i']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, complex_=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard real nonsymmetric problem
SNR = DictWithRepr("std-real-nonsym")
SNR['mat'] = Ar
SNR['v0'] = v0
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
# general real nonsymmetric problem
GNR = DictWithRepr("gen-real-nonsym")
GNR['mat'] = Ar
GNR['bmat'] = M
GNR['v0'] = v0
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
# standard complex nonsymmetric problem
SNC = DictWithRepr("std-cmplx-nonsym")
SNC['mat'] = Ac
SNC['v0'] = v0
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
# general complex nonsymmetric problem
GNC = DictWithRepr("gen-cmplx-nonsym")
GNC['mat'] = Ac
GNC['bmat'] = M
GNC['v0'] = v0
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
self.real_test_cases = [SNR, GNR]
self.complex_test_cases = [SNC, GNC]
def test_symmetric_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for (sigma, modes) in params.sigmas_modes.items():
for mode in modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, None, mode)
def test_hermitian_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.complex_test_cases:
for typ in 'FD':
for which in params.which:
if which == 'BE':
continue # BE invalid for complex
for mattype in params.mattypes:
for sigma in params.sigmas_modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_symmetric_starting_vector():
params = SymmetricParams()
symmetric = True
for k in [1, 2, 3, 4, 5]:
for D in params.real_test_cases:
for typ in 'fd':
v0 = random.rand(len(D['v0'])).astype(typ)
eval_evec(symmetric, D, typ, k, 'LM', v0)
def test_symmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, hermitian=True, pos_definite=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
def test_real_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for sigma, OPparts in params.sigmas_OPparts.items():
for OPpart in OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, OPpart)
def test_complex_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.complex_test_cases:
for typ in 'DF':
for which in params.which:
for mattype in params.mattypes:
for sigma in params.sigmas_OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_standard_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
def test_general_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
def test_standard_nonsymmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, complex_=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
def test_eigen_bad_shapes():
# A is not square.
A = csc_matrix(np.zeros((2, 3)))
assert_raises(ValueError, eigs, A)
def test_eigen_bad_kwargs():
# Test eigen on wrong keyword argument
A = csc_matrix(np.zeros((8, 8)))
assert_raises(ValueError, eigs, A, which='XX')
def test_ticket_1459_arpack_crash():
for dtype in [np.float32, np.float64]:
# This test does not seem to catch the issue for float32,
# but we made the same fix there, just to be sure
N = 6
k = 2
np.random.seed(2301)
A = np.random.random((N, N)).astype(dtype)
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
-0.34365925382227402451, 0.46122533684552280420,
-0.58001341115969040629, -0.78844877570084292984e-01],
dtype=dtype)
# Should not crash:
evals, evecs = eigs(A, k, v0=v0)
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_linearoperator_deallocation():
# Check that the linear operators used by the Arpack wrappers are
# deallocatable by reference counting -- they are big objects, so
# Python's cyclic GC may not collect them fast enough before
# running out of memory if eigs/eigsh are called in a tight loop.
M_d = np.eye(10)
M_s = csc_matrix(M_d)
M_o = aslinearoperator(M_d)
with assert_deallocated(lambda: arpack.SpLuInv(M_s)):
pass
with assert_deallocated(lambda: arpack.LuInv(M_d)):
pass
with assert_deallocated(lambda: arpack.IterInv(M_s)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)):
pass
def test_parallel_threads():
results = []
v0 = np.random.rand(50)
def worker():
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=3, v0=v0)
results.append(w)
w, v = eigsh(x, k=3, v0=v0)
results.append(w)
threads = [threading.Thread(target=worker) for k in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
worker()
for r in results:
assert_allclose(r, results[-1])
def test_reentering():
# Just some linear operator that calls eigs recursively
def A_matvec(x):
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=1)
return v / w[0]
A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50))
# The Fortran code is not reentrant, so this fails (gracefully, not crashing)
assert_raises(RuntimeError, eigs, A, k=1)
assert_raises(RuntimeError, eigsh, A, k=1)
def test_regression_arpackng_1315():
# Check that issue arpack-ng/#1315 is not present.
# Adapted from arpack-ng/TESTS/bug_1315_single.c
# If this fails, then the installed ARPACK library is faulty.
for dtype in [np.float32, np.float64]:
np.random.seed(1234)
w0 = np.arange(1, 1000+1).astype(dtype)
A = diags([w0], [0], shape=(1000, 1000))
v0 = np.random.rand(1000).astype(dtype)
w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0)
assert_allclose(np.sort(w), np.sort(w0[-9:]),
rtol=1e-4)
def test_eigs_for_k_greater():
# Test eigs() for k beyond limits.
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False)
M_dense = np.random.random((4, 4))
M_sparse = generate_matrix(4, sparse=True)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eig(A, b=M_dense)
eig_tuple2 = eig(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigs, A, M=M_linop, k=3)
# Test 'A' for different types
assert_raises(TypeError, eigs, aslinearoperator(A), k=3)
assert_raises(TypeError, eigs, A_sparse, k=3)
def test_eigsh_for_k_greater():
# Test eigsh() for k beyond limits.
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False)
M_dense = generate_matrix_symmetric(4, pos_definite=True)
M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eigh(A, b=M_dense)
eig_tuple2 = eigh(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigsh, A, M=M_linop, k=4)
# Test 'A' for different types
assert_raises(TypeError, eigsh, aslinearoperator(A), k=4)
assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
def test_real_eigs_real_k_subset():
np.random.seed(1)
n = 10
A = rand(n, n, density=0.5)
A.data *= 2
A.data -= 1
v0 = np.ones(n)
whichs = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
dtypes = [np.float32, np.float64]
for which, sigma, dtype in itertools.product(whichs, [None, 0, 5], dtypes):
prev_w = np.array([], dtype=dtype)
eps = np.finfo(dtype).eps
for k in range(1, 9):
w, z = eigs(A.astype(dtype), k=k, which=which, sigma=sigma,
v0=v0.astype(dtype), tol=0)
assert_allclose(np.linalg.norm(A.dot(z) - z * w), 0, atol=np.sqrt(eps))
# Check that the set of eigenvalues for `k` is a subset of that for `k+1`
dist = abs(prev_w[:,None] - w).min(axis=1)
assert_allclose(dist, 0, atol=np.sqrt(eps))
prev_w = w
# Check sort order
if sigma is None:
d = w
else:
d = 1 / (w - sigma)
if which == 'LM':
# ARPACK is systematic for 'LM', but sort order
# appears not well defined for other modes
assert np.all(np.diff(abs(d)) <= 1e-6)
| {
"content_hash": "08157a4ac49e2dd9fb56fc5faf53ef9f",
"timestamp": "",
"source": "github",
"line_count": 711,
"max_line_length": 85,
"avg_line_length": 32.970464135021096,
"alnum_prop": 0.5418906236669226,
"repo_name": "e-q/scipy",
"id": "b45963cead79387433cce4e944b43dcc6e7404de",
"size": "23442",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4399737"
},
{
"name": "C++",
"bytes": "649915"
},
{
"name": "Dockerfile",
"bytes": "1291"
},
{
"name": "Fortran",
"bytes": "5368728"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12786221"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
"""
Copyright 2015-2017 Paderborn University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import json
import time
import os
import threading
from sonmanobase import messaging
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger("son-mano-base:plugin")
LOG.setLevel(logging.DEBUG)
class ManoBasePlugin(object):
"""
Abstract class that should be inherited by other MANO plugins.
This class provides basic mechanisms to
- connect to the broker
- send/receive async/sync request/response calls
- send/receive notifications
- register / de-register plugin to plugin manager
It also implements a automatic heartbeat mechanism that periodically sends
heartbeat notifications.
"""
def __init__(self,
name="son-plugin",
version=None,
description=None,
auto_register=True,
wait_for_registration=True,
auto_heartbeat_rate=0.5):
"""
Performs plugin initialization steps, e.g., connection setup
:param name: Plugin name prefix
:param version: Plugin version
:param description: A description string
:param auto_register: Automatically register on init
:param wait_for_registration: Wait for registration before returning from init
:param auto_heartbeat_rate: rate of automatic heartbeat notifications 1/n seconds. 0=deactivated
:return:
"""
self.name = "%s.%s" % (name, self.__class__.__name__)
self.version = version
self.description = description
self.uuid = None # uuid given by plugin manager on registration
self.state = None # the state of this plugin READY/RUNNING/PAUSED/FAILED
LOG.info(
"Starting MANO Plugin: %r ..." % self.name)
# create and initialize broker connection
self.manoconn = messaging.ManoBrokerRequestResponseConnection(self.name)
# register subscriptions
self.declare_subscriptions()
# register to plugin manager
if auto_register:
self.register()
if wait_for_registration:
self._wait_for_registration()
# add additional subscriptions
self._register_lifecycle_endpoints()
# kick-off automatic heartbeat mechanism
self._auto_heartbeat(auto_heartbeat_rate)
# jump to run
self.run()
def __del__(self):
"""
Actions done when plugin is destroyed.
:return:
"""
# de-register this plugin
self.deregister()
self.manoconn.stop_connection()
del self.manoconn
def _auto_heartbeat(self, rate):
"""
A simple periodic heartbeat mechanism.
(much room for improvements here)
:param rate: rate of heartbeat notifications
:return:
"""
if rate <= 0:
return
def run():
while True:
if self.uuid is not None:
self._send_heartbeat()
time.sleep(1/rate)
# run heartbeats in separated thread
t = threading.Thread(target=run)
t.daemon = True
t.start()
def _send_heartbeat(self):
self.manoconn.notify(
"platform.management.plugin.%s.heartbeat" % str(self.uuid),
json.dumps({"uuid": self.uuid,
"state": str(self.state)}))
def declare_subscriptions(self):
"""
Can be overwritten by subclass.
But: The this superclass method should be called in any case.
"""
# plugin status update subscription
self.manoconn.register_notification_endpoint(
self.on_plugin_status_update, # call back method
"platform.management.plugin.status")
def run(self):
"""
To be overwritten by subclass
"""
# go into infinity loop (we could do anything here)
while True:
time.sleep(1)
def on_lifecycle_start(self, ch, method, properties, message):
"""
To be overwritten by subclass
"""
LOG.debug("Received lifecycle.start event.")
self.state = "RUNNING"
def on_lifecycle_pause(self, ch, method, properties, message):
"""
To be overwritten by subclass
"""
LOG.debug("Received lifecycle.pause event.")
self.state = "PAUSED"
def on_lifecycle_stop(self, ch, method, properties, message):
"""
To be overwritten by subclass
"""
LOG.debug("Received lifecycle.stop event.")
self.deregister()
os._exit(0)
def on_registration_ok(self):
"""
To be overwritten by subclass
"""
LOG.debug("Received registration ok event.")
pass
def on_plugin_status_update(self, ch, method, properties, message):
"""
To be overwritten by subclass.
Called when a plugin list status update
is received from the plugin manager.
"""
LOG.debug("Received plugin status update %r." % str(message))
def register(self):
"""
Send a register request to the plugin manager component to announce this plugin.
"""
message = {"name": self.name,
"version": self.version,
"description": self.description}
self.manoconn.call_async(self._on_register_response,
"platform.management.plugin.register",
json.dumps(message))
def _on_register_response(self, ch, method, props, response):
"""
Event triggered when register response is received.
:param props: response properties
:param response: response body
:return: None
"""
response = json.loads(str(response))
if response.get("status") != "OK":
LOG.debug("Response %r" % response)
LOG.error("Plugin registration failed. Exit.")
exit(1)
self.uuid = response.get("uuid")
# mark this plugin to be ready to be started
self.state = "READY"
LOG.info("Plugin registered with UUID: %r" % response.get("uuid"))
# jump to on_registration_ok()
self.on_registration_ok()
self._send_heartbeat()
def deregister(self):
"""
Send a deregister event to the plugin manager component.
"""
LOG.info("De-registering plugin...")
message = {"uuid": self.uuid}
self.manoconn.call_async(self._on_deregister_response,
"platform.management.plugin.deregister",
json.dumps(message))
def _on_deregister_response(self, ch, method, props, response):
"""
Event triggered when de-register response is received.
:param props: response properties
:param response: response body
:return: None
"""
response = json.loads(str(response))
if response.get("status") != "OK":
LOG.error("Plugin de-registration failed. Exit.")
exit(1)
LOG.info("Plugin de-registered.")
def _wait_for_registration(self, timeout=5, sleep_interval=0.1):
"""
Method to do active waiting until the registration is completed.
(not nice, but ok for now)
:param timeout: max wait
:param sleep_interval: sleep interval
:return: None
"""
# FIXME: Use threading.Event() for this?
c = 0
LOG.debug("Waiting for registration (timeout=%d) ..." % timeout)
while self.uuid is None and c < timeout:
time.sleep(sleep_interval)
c += sleep_interval
def _register_lifecycle_endpoints(self):
if self.uuid is not None:
# lifecycle.start
self.manoconn.register_notification_endpoint(
self.on_lifecycle_start, # call back method
"platform.management.plugin.%s.lifecycle.start" % str(self.uuid))
# lifecycle.pause
self.manoconn.register_notification_endpoint(
self.on_lifecycle_pause, # call back method
"platform.management.plugin.%s.lifecycle.pause" % str(self.uuid))
# lifecycle.stop
self.manoconn.register_notification_endpoint(
self.on_lifecycle_stop, # call back method
"platform.management.plugin.%s.lifecycle.stop" % str(self.uuid))
| {
"content_hash": "770228d85756d9e7dc91c6bf8e3e402c",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 104,
"avg_line_length": 35.10077519379845,
"alnum_prop": 0.5987190812720848,
"repo_name": "sonata-nfv/son-tests",
"id": "9db09b15e2bca03cf8f4bc0db1664b55f7c380f4",
"size": "9056",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "int-slm-infrabstractV1/test-cleaner/son-mano-base/sonmanobase/plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "27764"
},
{
"name": "JavaScript",
"bytes": "29436"
},
{
"name": "Python",
"bytes": "262959"
},
{
"name": "Shell",
"bytes": "112803"
}
],
"symlink_target": ""
} |
"""
Algorithms for solving Parametric Risch Differential Equations.
The methods used for solving Parametric Risch Differential Equations parallel
those for solving Risch Differential Equations. See the outline in the
docstring of rde.py for more information.
The Parametric Risch Differential Equation problem is, given f, g1, ..., gm in
K(t), to determine if there exist y in K(t) and c1, ..., cm in Const(K) such
that Dy + f*y == Sum(ci*gi, (i, 1, m)), and to find such y and ci if they exist.
For the algorithms here G is a list of tuples of factions of the terms on the
right hand side of the equation (i.e., gi in k(t)), and Q is a list of terms on
the right hand side of the equation (i.e., qi in k[t]). See the docstring of
each function for more information.
"""
from __future__ import print_function, division
from sympy.core import Dummy, ilcm, Add, Mul, Pow, S
from sympy.core.compatibility import reduce, range
from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer,
bound_degree)
from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation,
residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel,
recognize_log_derivative)
from sympy.matrices import zeros, eye
from sympy.polys import Poly, lcm, cancel, sqf_list
from sympy.polys.polymatrix import PolyMatrix as Matrix
from sympy.solvers import solve
def prde_normal_denom(fa, fd, G, DE):
"""
Parametric Risch Differential Equation - Normal part of the denominator.
Given a derivation D on k[t] and f, g1, ..., gm in k(t) with f weakly
normalized with respect to t, return the tuple (a, b, G, h) such that
a, h in k[t], b in k<t>, G = [g1, ..., gm] in k(t)^m, and for any solution
c1, ..., cm in Const(k) and y in k(t) of Dy + f*y == Sum(ci*gi, (i, 1, m)),
q == y*h in k<t> satisfies a*Dq + b*q == Sum(ci*Gi, (i, 1, m)).
"""
dn, ds = splitfactor(fd, DE)
Gas, Gds = list(zip(*G))
gd = reduce(lambda i, j: i.lcm(j), Gds, Poly(1, DE.t))
en, es = splitfactor(gd, DE)
p = dn.gcd(en)
h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t)))
a = dn*h
c = a*h
ba = a*fa - dn*derivation(h, DE)*fd
ba, bd = ba.cancel(fd, include=True)
G = [(c*A).cancel(D, include=True) for A, D in G]
return (a, (ba, bd), G, h)
def real_imag(ba, bd, gen):
"""
Helper function, to get the real and imaginary part of a rational function
evaluated at sqrt(-1) without actually evaluating it at sqrt(-1)
Separates the even and odd power terms by checking the degree of terms wrt
mod 4. Returns a tuple (ba[0], ba[1], bd) where ba[0] is real part
of the numerator ba[1] is the imaginary part and bd is the denominator
of the rational function.
"""
bd = bd.as_poly(gen).as_dict()
ba = ba.as_poly(gen).as_dict()
denom_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in bd.items()]
denom_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in bd.items()]
bd_real = sum(r for r in denom_real)
bd_imag = sum(r for r in denom_imag)
num_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in ba.items()]
num_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in ba.items()]
ba_real = sum(r for r in num_real)
ba_imag = sum(r for r in num_imag)
ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen))
bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen)
return (ba[0], ba[1], bd)
def prde_special_denom(a, ba, bd, G, DE, case='auto'):
"""
Parametric Risch Differential Equation - Special part of the denominator.
case is one of {'exp', 'tan', 'primitive'} for the hyperexponential,
hypertangent, and primitive cases, respectively. For the hyperexponential
(resp. hypertangent) case, given a derivation D on k[t] and a in k[t],
b in k<t>, and g1, ..., gm in k(t) with Dt/t in k (resp. Dt/(t**2 + 1) in
k, sqrt(-1) not in k), a != 0, and gcd(a, t) == 1 (resp.
gcd(a, t**2 + 1) == 1), return the tuple (A, B, GG, h) such that A, B, h in
k[t], GG = [gg1, ..., ggm] in k(t)^m, and for any solution c1, ..., cm in
Const(k) and q in k<t> of a*Dq + b*q == Sum(ci*gi, (i, 1, m)), r == q*h in
k[t] satisfies A*Dr + B*r == Sum(ci*ggi, (i, 1, m)).
For case == 'primitive', k<t> == k[t], so it returns (a, b, G, 1) in this
case.
"""
# TODO: Merge this with the very similar special_denom() in rde.py
if case == 'auto':
case = DE.case
if case == 'exp':
p = Poly(DE.t, DE.t)
elif case == 'tan':
p = Poly(DE.t**2 + 1, DE.t)
elif case in ['primitive', 'base']:
B = ba.quo(bd)
return (a, B, G, Poly(1, DE.t))
else:
raise ValueError("case must be one of {'exp', 'tan', 'primitive', "
"'base'}, not %s." % case)
nb = order_at(ba, p, DE.t) - order_at(bd, p, DE.t)
nc = min([order_at(Ga, p, DE.t) - order_at(Gd, p, DE.t) for Ga, Gd in G])
n = min(0, nc - min(0, nb))
if not nb:
# Possible cancellation.
if case == 'exp':
dcoeff = DE.d.quo(Poly(DE.t, DE.t))
with DecrementLevel(DE): # We are guaranteed to not have problems,
# because case != 'base'.
alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t)
etaa, etad = frac_in(dcoeff, DE.t)
A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE)
if A is not None:
Q, m, z = A
if Q == 1:
n = min(n, m)
elif case == 'tan':
dcoeff = DE.d.quo(Poly(DE.t**2 + 1, DE.t))
with DecrementLevel(DE): # We are guaranteed to not have problems,
# because case != 'base'.
betaa, alphaa, alphad = real_imag(ba, bd*a, DE.t)
betad = alphad
etaa, etad = frac_in(dcoeff, DE.t)
if recognize_log_derivative(2*betaa, betad, DE):
A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE)
B = parametric_log_deriv(betaa, betad, etaa, etad, DE)
if A is not None and B is not None:
Q, s, z = A
# TODO: Add test
if Q == 1:
n = min(n, s/2)
N = max(0, -nb)
pN = p**N
pn = p**-n # This is 1/h
A = a*pN
B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN
G = [(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd in G]
h = pn
# (a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N - n), ..., gm*p**(N - n), p**-n)
return (A, B, G, h)
def prde_linear_constraints(a, b, G, DE):
"""
Parametric Risch Differential Equation - Generate linear constraints on the constants.
Given a derivation D on k[t], a, b, in k[t] with gcd(a, b) == 1, and
G = [g1, ..., gm] in k(t)^m, return Q = [q1, ..., qm] in k[t]^m and a
matrix M with entries in k(t) such that for any solution c1, ..., cm in
Const(k) and p in k[t] of a*Dp + b*p == Sum(ci*gi, (i, 1, m)),
(c1, ..., cm) is a solution of Mx == 0, and p and the ci satisfy
a*Dp + b*p == Sum(ci*qi, (i, 1, m)).
Because M has entries in k(t), and because Matrix doesn't play well with
Poly, M will be a Matrix of Basic expressions.
"""
m = len(G)
Gns, Gds = list(zip(*G))
d = reduce(lambda i, j: i.lcm(j), Gds)
d = Poly(d, field=True)
Q = [(ga*(d).quo(gd)).div(d) for ga, gd in G]
if not all([ri.is_zero for _, ri in Q]):
N = max([ri.degree(DE.t) for _, ri in Q])
M = Matrix(N + 1, m, lambda i, j: Q[j][1].nth(i))
else:
M = Matrix(0, m, []) # No constraints, return the empty matrix.
qs, _ = list(zip(*Q))
return (qs, M)
def poly_linear_constraints(p, d):
"""
Given p = [p1, ..., pm] in k[t]^m and d in k[t], return
q = [q1, ..., qm] in k[t]^m and a matrix M with entries in k such
that Sum(ci*pi, (i, 1, m)), for c1, ..., cm in k, is divisible
by d if and only if (c1, ..., cm) is a solution of Mx = 0, in
which case the quotient is Sum(ci*qi, (i, 1, m)).
"""
m = len(p)
q, r = zip(*[pi.div(d) for pi in p])
if not all([ri.is_zero for ri in r]):
n = max([ri.degree() for ri in r])
M = Matrix(n + 1, m, lambda i, j: r[j].nth(i))
else:
M = Matrix(0, m, []) # No constraints.
return q, M
def constant_system(A, u, DE):
"""
Generate a system for the constant solutions.
Given a differential field (K, D) with constant field C = Const(K), a Matrix
A, and a vector (Matrix) u with coefficients in K, returns the tuple
(B, v, s), where B is a Matrix with coefficients in C and v is a vector
(Matrix) such that either v has coefficients in C, in which case s is True
and the solutions in C of Ax == u are exactly all the solutions of Bx == v,
or v has a non-constant coefficient, in which case s is False Ax == u has no
constant solution.
This algorithm is used both in solving parametric problems and in
determining if an element a of K is a derivative of an element of K or the
logarithmic derivative of a K-radical using the structure theorem approach.
Because Poly does not play well with Matrix yet, this algorithm assumes that
all matrix entries are Basic expressions.
"""
if not A:
return A, u
Au = A.row_join(u)
Au = Au.rref(simplify=cancel, normalize_last=False)[0]
# Warning: This will NOT return correct results if cancel() cannot reduce
# an identically zero expression to 0. The danger is that we might
# incorrectly prove that an integral is nonelementary (such as
# risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x).
# But this is a limitation in computer algebra in general, and implicit
# in the correctness of the Risch Algorithm is the computability of the
# constant field (actually, this same correctness problem exists in any
# algorithm that uses rref()).
#
# We therefore limit ourselves to constant fields that are computable
# via the cancel() function, in order to prevent a speed bottleneck from
# calling some more complex simplification function (rational function
# coefficients will fall into this class). Furthermore, (I believe) this
# problem will only crop up if the integral explicitly contains an
# expression in the constant field that is identically zero, but cannot
# be reduced to such by cancel(). Therefore, a careful user can avoid this
# problem entirely by being careful with the sorts of expressions that
# appear in his integrand in the variables other than the integration
# variable (the structure theorems should be able to completely decide these
# problems in the integration variable).
Au = Au.applyfunc(cancel)
A, u = Au[:, :-1], Au[:, -1]
for j in range(A.cols):
for i in range(A.rows):
if A[i, j].has(*DE.T):
# This assumes that const(F(t0, ..., tn) == const(K) == F
Ri = A[i, :]
# Rm+1; m = A.rows
Rm1 = Ri.applyfunc(lambda x: derivation(x, DE, basic=True)/
derivation(A[i, j], DE, basic=True))
Rm1 = Rm1.applyfunc(cancel)
um1 = cancel(derivation(u[i], DE, basic=True)/
derivation(A[i, j], DE, basic=True))
for s in range(A.rows):
# A[s, :] = A[s, :] - A[s, i]*A[:, m+1]
Asj = A[s, j]
A.row_op(s, lambda r, jj: cancel(r - Asj*Rm1[jj]))
# u[s] = u[s] - A[s, j]*u[m+1
u.row_op(s, lambda r, jj: cancel(r - Asj*um1))
A = A.col_join(Rm1)
u = u.col_join(Matrix([um1]))
return (A, u)
def prde_spde(a, b, Q, n, DE):
"""
Special Polynomial Differential Equation algorithm: Parametric Version.
Given a derivation D on k[t], an integer n, and a, b, q1, ..., qm in k[t]
with deg(a) > 0 and gcd(a, b) == 1, return (A, B, Q, R, n1), with
Qq = [q1, ..., qm] and R = [r1, ..., rm], such that for any solution
c1, ..., cm in Const(k) and q in k[t] of degree at most n of
a*Dq + b*q == Sum(ci*gi, (i, 1, m)), p = (q - Sum(ci*ri, (i, 1, m)))/a has
degree at most n1 and satisfies A*Dp + B*p == Sum(ci*qi, (i, 1, m))
"""
R, Z = list(zip(*[gcdex_diophantine(b, a, qi) for qi in Q]))
A = a
B = b + derivation(a, DE)
Qq = [zi - derivation(ri, DE) for ri, zi in zip(R, Z)]
R = list(R)
n1 = n - a.degree(DE.t)
return (A, B, Qq, R, n1)
def prde_no_cancel_b_large(b, Q, n, DE):
"""
Parametric Poly Risch Differential Equation - No cancellation: deg(b) large enough.
Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with
b != 0 and either D == d/dt or deg(b) > max(0, deg(D) - 1), returns
h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that
if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and
Dq + b*q == Sum(ci*qi, (i, 1, m)), then q = Sum(dj*hj, (j, 1, r)), where
d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0.
"""
db = b.degree(DE.t)
m = len(Q)
H = [Poly(0, DE.t)]*m
for N in range(n, -1, -1): # [n, ..., 0]
for i in range(m):
si = Q[i].nth(N + db)/b.LC()
sitn = Poly(si*DE.t**N, DE.t)
H[i] = H[i] + sitn
Q[i] = Q[i] - derivation(sitn, DE) - b*sitn
if all(qi.is_zero for qi in Q):
dc = -1
M = zeros(0, 2)
else:
dc = max([qi.degree(DE.t) for qi in Q])
M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i))
A, u = constant_system(M, zeros(dc + 1, 1), DE)
c = eye(m)
A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c))
return (H, A)
def prde_no_cancel_b_small(b, Q, n, DE):
"""
Parametric Poly Risch Differential Equation - No cancellation: deg(b) small enough.
Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with
deg(b) < deg(D) - 1 and either D == d/dt or deg(D) >= 2, returns
h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that
if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and
Dq + b*q == Sum(ci*qi, (i, 1, m)) then q = Sum(dj*hj, (j, 1, r)) where
d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0.
"""
m = len(Q)
H = [Poly(0, DE.t)]*m
for N in range(n, 0, -1): # [n, ..., 1]
for i in range(m):
si = Q[i].nth(N + DE.d.degree(DE.t) - 1)/(N*DE.d.LC())
sitn = Poly(si*DE.t**N, DE.t)
H[i] = H[i] + sitn
Q[i] = Q[i] - derivation(sitn, DE) - b*sitn
if b.degree(DE.t) > 0:
for i in range(m):
si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t)
H[i] = H[i] + si
Q[i] = Q[i] - derivation(si, DE) - b*si
if all(qi.is_zero for qi in Q):
dc = -1
M = Matrix()
else:
dc = max([qi.degree(DE.t) for qi in Q])
M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i))
A, u = constant_system(M, zeros(dc + 1, 1), DE)
c = eye(m)
A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c))
return (H, A)
# else: b is in k, deg(qi) < deg(Dt)
t = DE.t
if DE.case != 'base':
with DecrementLevel(DE):
t0 = DE.t # k = k0(t0)
ba, bd = frac_in(b, t0, field=True)
Q0 = [frac_in(qi.TC(), t0, field=True) for qi in Q]
f, B = param_rischDE(ba, bd, Q0, DE)
# f = [f1, ..., fr] in k^r and B is a matrix with
# m + r columns and entries in Const(k) = Const(k0)
# such that Dy0 + b*y0 = Sum(ci*qi, (i, 1, m)) has
# a solution y0 in k with c1, ..., cm in Const(k)
# if and only y0 = Sum(dj*fj, (j, 1, r)) where
# d1, ..., dr ar in Const(k) and
# B*Matrix([c1, ..., cm, d1, ..., dr]) == 0.
# Transform fractions (fa, fd) in f into constant
# polynomials fa/fd in k[t].
# (Is there a better way?)
f = [Poly(fa.as_expr()/fd.as_expr(), t, field=True)
for fa, fd in f]
else:
# Base case. Dy == 0 for all y in k and b == 0.
# Dy + b*y = Sum(ci*qi) is solvable if and only if
# Sum(ci*qi) == 0 in which case the solutions are
# y = d1*f1 for f1 = 1 and any d1 in Const(k) = k.
f = [Poly(1, t, field=True)] # r = 1
B = Matrix([[qi.TC() for qi in Q] + [S.Zero]])
# The condition for solvability is
# B*Matrix([c1, ..., cm, d1]) == 0
# There are no constraints on d1.
# Coefficients of t^j (j > 0) in Sum(ci*qi) must be zero.
d = max([qi.degree(DE.t) for qi in Q])
if d > 0:
M = Matrix(d, m, lambda i, j: Q[j].nth(i + 1))
A, _ = constant_system(M, zeros(d, 1), DE)
else:
# No constraints on the hj.
A = Matrix(0, m, [])
# Solutions of the original equation are
# y = Sum(dj*fj, (j, 1, r) + Sum(ei*hi, (i, 1, m)),
# where ei == ci (i = 1, ..., m), when
# A*Matrix([c1, ..., cm]) == 0 and
# B*Matrix([c1, ..., cm, d1, ..., dr]) == 0
# Build combined constraint matrix with m + r + m columns.
r = len(f)
I = eye(m)
A = A.row_join(zeros(A.rows, r + m))
B = B.row_join(zeros(B.rows, m))
C = I.row_join(zeros(m, r)).row_join(-I)
return f + H, A.col_join(B).col_join(C)
def prde_cancel_liouvillian(b, Q, n, DE):
"""
Pg, 237.
"""
H = []
# Why use DecrementLevel? Below line answers that:
# Assuming that we can solve such problems over 'k' (not k[t])
if DE.case == 'primitive':
with DecrementLevel(DE):
ba, bd = frac_in(b, DE.t, field=True)
for i in range(n, -1, -1):
if DE.case == 'exp': # this re-checking can be avoided
with DecrementLevel(DE):
ba, bd = frac_in(b + i*derivation(DE.t, DE)/DE.t,
DE.t, field=True)
with DecrementLevel(DE):
Qy = [frac_in(q.nth(i), DE.t, field=True) for q in Q]
fi, Ai = param_rischDE(ba, bd, Qy, DE)
fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True)
for fa, fd in fi]
ri = len(fi)
if i == n:
M = Ai
else:
M = Ai.col_join(M.row_join(zeros(M.rows, ri)))
Fi, hi = [None]*ri, [None]*ri
# from eq. on top of p.238 (unnumbered)
for j in range(ri):
hji = fi[j]*DE.t**i
hi[j] = hji
# building up Sum(djn*(D(fjn*t^n) - b*fjnt^n))
Fi[j] = -(derivation(hji, DE) - b*hji)
H += hi
# in the next loop instead of Q it has
# to be Q + Fi taking its place
Q = Q + Fi
return (H, M)
def param_poly_rischDE(a, b, q, n, DE):
"""Polynomial solutions of a parametric Risch differential equation.
Given a derivation D in k[t], a, b in k[t] relatively prime, and q
= [q1, ..., qm] in k[t]^m, return h = [h1, ..., hr] in k[t]^r and
a matrix A with m + r columns and entries in Const(k) such that
a*Dp + b*p = Sum(ci*qi, (i, 1, m)) has a solution p of degree <= n
in k[t] with c1, ..., cm in Const(k) if and only if p = Sum(dj*hj,
(j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm,
d1, ..., dr) is a solution of Ax == 0.
"""
m = len(q)
if n < 0:
# Only the trivial zero solution is possible.
# Find relations between the qi.
if all([qi.is_zero for qi in q]):
return [], zeros(1, m) # No constraints.
N = max([qi.degree(DE.t) for qi in q])
M = Matrix(N + 1, m, lambda i, j: q[j].nth(i))
A, _ = constant_system(M, zeros(M.rows, 1), DE)
return [], A
if a.is_ground:
# Normalization: a = 1.
a = a.LC()
b, q = b.quo_ground(a), [qi.quo_ground(a) for qi in q]
if not b.is_zero and (DE.case == 'base' or
b.degree() > max(0, DE.d.degree() - 1)):
return prde_no_cancel_b_large(b, q, n, DE)
elif ((b.is_zero or b.degree() < DE.d.degree() - 1)
and (DE.case == 'base' or DE.d.degree() >= 2)):
return prde_no_cancel_b_small(b, q, n, DE)
elif (DE.d.degree() >= 2 and
b.degree() == DE.d.degree() - 1 and
n > -b.as_poly().LC()/DE.d.as_poly().LC()):
raise NotImplementedError("prde_no_cancel_b_equal() is "
"not yet implemented.")
else:
# Liouvillian cases
if DE.case == 'primitive' or DE.case == 'exp':
return prde_cancel_liouvillian(b, q, n, DE)
else:
raise NotImplementedError("non-linear and hypertangent "
"cases have not yet been implemented")
# else: deg(a) > 0
# Iterate SPDE as long as possible cumulating coefficient
# and terms for the recovery of original solutions.
alpha, beta = 1, [0]*m
while n >= 0: # and a, b relatively prime
a, b, q, r, n = prde_spde(a, b, q, n, DE)
beta = [betai + alpha*ri for betai, ri in zip(beta, r)]
alpha *= a
# Solutions p of a*Dp + b*p = Sum(ci*qi) correspond to
# solutions alpha*p + Sum(ci*betai) of the initial equation.
d = a.gcd(b)
if not d.is_ground:
break
# a*Dp + b*p = Sum(ci*qi) may have a polynomial solution
# only if the sum is divisible by d.
qq, M = poly_linear_constraints(q, d)
# qq = [qq1, ..., qqm] where qqi = qi.quo(d).
# M is a matrix with m columns an entries in k.
# Sum(fi*qi, (i, 1, m)), where f1, ..., fm are elements of k, is
# divisible by d if and only if M*Matrix([f1, ..., fm]) == 0,
# in which case the quotient is Sum(fi*qqi).
A, _ = constant_system(M, zeros(M.rows, 1), DE)
# A is a matrix with m columns and entries in Const(k).
# Sum(ci*qqi) is Sum(ci*qi).quo(d), and the remainder is zero
# for c1, ..., cm in Const(k) if and only if
# A*Matrix([c1, ...,cm]) == 0.
V = A.nullspace()
# V = [v1, ..., vu] where each vj is a column matrix with
# entries aj1, ..., ajm in Const(k).
# Sum(aji*qi) is divisible by d with exact quotient Sum(aji*qqi).
# Sum(ci*qi) is divisible by d if and only if ci = Sum(dj*aji)
# (i = 1, ..., m) for some d1, ..., du in Const(k).
# In that case, solutions of
# a*Dp + b*p = Sum(ci*qi) = Sum(dj*Sum(aji*qi))
# are the same as those of
# (a/d)*Dp + (b/d)*p = Sum(dj*rj)
# where rj = Sum(aji*qqi).
if not V: # No non-trivial solution.
return [], eye(m) # Could return A, but this has
# the minimum number of rows.
Mqq = Matrix([qq]) # A single row.
r = [(Mqq*vj)[0] for vj in V] # [r1, ..., ru]
# Solutions of (a/d)*Dp + (b/d)*p = Sum(dj*rj) correspond to
# solutions alpha*p + Sum(Sum(dj*aji)*betai) of the initial
# equation. These are equal to alpha*p + Sum(dj*fj) where
# fj = Sum(aji*betai).
Mbeta = Matrix([beta])
f = [(Mbeta*vj)[0] for vj in V] # [f1, ..., fu]
#
# Solve the reduced equation recursively.
#
g, B = param_poly_rischDE(a.quo(d), b.quo(d), r, n, DE)
# g = [g1, ..., gv] in k[t]^v and and B is a matrix with u + v
# columns and entries in Const(k) such that
# (a/d)*Dp + (b/d)*p = Sum(dj*rj) has a solution p of degree <= n
# in k[t] if and only if p = Sum(ek*gk) where e1, ..., ev are in
# Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0.
# The solutions of the original equation are then
# Sum(dj*fj, (j, 1, u)) + alpha*Sum(ek*gk, (k, 1, v)).
# Collect solution components.
h = f + [alpha*gk for gk in g]
# Build combined relation matrix.
A = -eye(m)
for vj in V:
A = A.row_join(vj)
A = A.row_join(zeros(m, len(g)))
A = A.col_join(zeros(B.rows, m).row_join(B))
return h, A
def param_rischDE(fa, fd, G, DE):
"""
Solve a Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)).
Given a derivation D in k(t), f in k(t), and G
= [G1, ..., Gm] in k(t)^m, return h = [h1, ..., hr] in k(t)^r and
a matrix A with m + r columns and entries in Const(k) such that
Dy + f*y = Sum(ci*Gi, (i, 1, m)) has a solution y
in k(t) with c1, ..., cm in Const(k) if and only if y = Sum(dj*hj,
(j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm,
d1, ..., dr) is a solution of Ax == 0.
Elements of k(t) are tuples (a, d) with a and d in k[t].
"""
m = len(G)
q, (fa, fd) = weak_normalizer(fa, fd, DE)
# Solutions of the weakly normalized equation Dz + f*z = q*Sum(ci*Gi)
# correspond to solutions y = z/q of the original equation.
gamma = q
G = [(q*ga).cancel(gd, include=True) for ga, gd in G]
a, (ba, bd), G, hn = prde_normal_denom(fa, fd, G, DE)
# Solutions q in k<t> of a*Dq + b*q = Sum(ci*Gi) correspond
# to solutions z = q/hn of the weakly normalized equation.
gamma *= hn
A, B, G, hs = prde_special_denom(a, ba, bd, G, DE)
# Solutions p in k[t] of A*Dp + B*p = Sum(ci*Gi) correspond
# to solutions q = p/hs of the previous equation.
gamma *= hs
g = A.gcd(B)
a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for
gia, gid in G]
# a*Dp + b*p = Sum(ci*gi) may have a polynomial solution
# only if the sum is in k[t].
q, M = prde_linear_constraints(a, b, g, DE)
# q = [q1, ..., qm] where qi in k[t] is the polynomial component
# of the partial fraction expansion of gi.
# M is a matrix with m columns and entries in k.
# Sum(fi*gi, (i, 1, m)), where f1, ..., fm are elements of k,
# is a polynomial if and only if M*Matrix([f1, ..., fm]) == 0,
# in which case the sum is equal to Sum(fi*qi).
M, _ = constant_system(M, zeros(M.rows, 1), DE)
# M is a matrix with m columns and entries in Const(k).
# Sum(ci*gi) is in k[t] for c1, ..., cm in Const(k)
# if and only if M*Matrix([c1, ..., cm]) == 0,
# in which case the sum is Sum(ci*qi).
## Reduce number of constants at this point
V = M.nullspace()
# V = [v1, ..., vu] where each vj is a column matrix with
# entries aj1, ..., ajm in Const(k).
# Sum(aji*gi) is in k[t] and equal to Sum(aji*qi) (j = 1, ..., u).
# Sum(ci*gi) is in k[t] if and only is ci = Sum(dj*aji)
# (i = 1, ..., m) for some d1, ..., du in Const(k).
# In that case,
# Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj)
# where rj = Sum(aji*qi) (j = 1, ..., u) in k[t].
if not V: # No non-trivial solution
return [], eye(m)
Mq = Matrix([q]) # A single row.
r = [(Mq*vj)[0] for vj in V] # [r1, ..., ru]
# Solutions of a*Dp + b*p = Sum(dj*rj) correspond to solutions
# y = p/gamma of the initial equation with ci = Sum(dj*aji).
try:
# We try n=5. At least for prde_spde, it will always
# terminate no matter what n is.
n = bound_degree(a, b, r, DE, parametric=True)
except NotImplementedError:
# A temporary bound is set. Eventually, it will be removed.
# the currently added test case takes large time
# even with n=5, and much longer with large n's.
n = 5
h, B = param_poly_rischDE(a, b, r, n, DE)
# h = [h1, ..., hv] in k[t]^v and and B is a matrix with u + v
# columns and entries in Const(k) such that
# a*Dp + b*p = Sum(dj*rj) has a solution p of degree <= n
# in k[t] if and only if p = Sum(ek*hk) where e1, ..., ev are in
# Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0.
# The solutions of the original equation for ci = Sum(dj*aji)
# (i = 1, ..., m) are then y = Sum(ek*hk, (k, 1, v))/gamma.
## Build combined relation matrix with m + u + v columns.
A = -eye(m)
for vj in V:
A = A.row_join(vj)
A = A.row_join(zeros(m, len(h)))
A = A.col_join(zeros(B.rows, m).row_join(B))
## Eliminate d1, ..., du.
W = A.nullspace()
# W = [w1, ..., wt] where each wl is a column matrix with
# entries blk (k = 1, ..., m + u + v) in Const(k).
# The vectors (bl1, ..., blm) generate the space of those
# constant families (c1, ..., cm) for which a solution of
# the equation Dy + f*y == Sum(ci*Gi) exists. They generate
# the space and form a basis except possibly when Dy + f*y == 0
# is solvable in k(t}. The corresponding solutions are
# y = Sum(blk'*hk, (k, 1, v))/gamma, where k' = k + m + u.
v = len(h)
M = Matrix([wl[:m] + wl[-v:] for wl in W]) # excise dj's.
N = M.nullspace()
# N = [n1, ..., ns] where the ni in Const(k)^(m + v) are column
# vectors generating the space of linear relations between
# c1, ..., cm, e1, ..., ev.
C = Matrix([ni[:] for ni in N]) # rows n1, ..., ns.
return [hk.cancel(gamma, include=True) for hk in h], C
def limited_integrate_reduce(fa, fd, G, DE):
"""
Simpler version of step 1 & 2 for the limited integration problem.
Given a derivation D on k(t) and f, g1, ..., gn in k(t), return
(a, b, h, N, g, V) such that a, b, h in k[t], N is a non-negative integer,
g in k(t), V == [v1, ..., vm] in k(t)^m, and for any solution v in k(t),
c1, ..., cm in C of f == Dv + Sum(ci*wi, (i, 1, m)), p = v*h is in k<t>, and
p and the ci satisfy a*Dp + b*p == g + Sum(ci*vi, (i, 1, m)). Furthermore,
if S1irr == Sirr, then p is in k[t], and if t is nonlinear or Liouvillian
over k, then deg(p) <= N.
So that the special part is always computed, this function calls the more
general prde_special_denom() automatically if it cannot determine that
S1irr == Sirr. Furthermore, it will automatically call bound_degree() when
t is linear and non-Liouvillian, which for the transcendental case, implies
that Dt == a*t + b with for some a, b in k*.
"""
dn, ds = splitfactor(fd, DE)
E = [splitfactor(gd, DE) for _, gd in G]
En, Es = list(zip(*E))
c = reduce(lambda i, j: i.lcm(j), (dn,) + En) # lcm(dn, en1, ..., enm)
hn = c.gcd(c.diff(DE.t))
a = hn
b = -derivation(hn, DE)
N = 0
# These are the cases where we know that S1irr = Sirr, but there could be
# others, and this algorithm will need to be extended to handle them.
if DE.case in ['base', 'primitive', 'exp', 'tan']:
hs = reduce(lambda i, j: i.lcm(j), (ds,) + Es) # lcm(ds, es1, ..., esm)
a = hn*hs
b -= (hn*derivation(hs, DE)).quo(hs)
mu = min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga, gd, DE.t) for
ga, gd in G]))
# So far, all the above are also nonlinear or Liouvillian, but if this
# changes, then this will need to be updated to call bound_degree()
# as per the docstring of this function (DE.case == 'other_linear').
N = hn.degree(DE.t) + hs.degree(DE.t) + max(0, 1 - DE.d.degree(DE.t) - mu)
else:
# TODO: implement this
raise NotImplementedError
V = [(-a*hn*ga).cancel(gd, include=True) for ga, gd in G]
return (a, b, a, N, (a*hn*fa).cancel(fd, include=True), V)
def limited_integrate(fa, fd, G, DE):
"""
Solves the limited integration problem: f = Dv + Sum(ci*wi, (i, 1, n))
"""
fa, fd = fa*Poly(1/fd.LC(), DE.t), fd.monic()
# interpreting limited integration problem as a
# parametric Risch DE problem
Fa = Poly(0, DE.t)
Fd = Poly(1, DE.t)
G = [(fa, fd)] + G
h, A = param_rischDE(Fa, Fd, G, DE)
V = A.nullspace()
V = [v for v in V if v[0] != 0]
if not V:
return None
else:
# we can take any vector from V, we take V[0]
c0 = V[0][0]
# v = [-1, c1, ..., cm, d1, ..., dr]
v = V[0]/(-c0)
r = len(h)
m = len(v) - r - 1
C = list(v[1: m + 1])
y = -sum([v[m + 1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \
for i in range(r)])
y_num, y_den = y.as_numer_denom()
Ya, Yd = Poly(y_num, DE.t), Poly(y_den, DE.t)
Y = Ya*Poly(1/Yd.LC(), DE.t), Yd.monic()
return Y, C
def parametric_log_deriv_heu(fa, fd, wa, wd, DE, c1=None):
"""
Parametric logarithmic derivative heuristic.
Given a derivation D on k[t], f in k(t), and a hyperexponential monomial
theta over k(t), raises either NotImplementedError, in which case the
heuristic failed, or returns None, in which case it has proven that no
solution exists, or returns a solution (n, m, v) of the equation
n*f == Dv/v + m*Dtheta/theta, with v in k(t)* and n, m in ZZ with n != 0.
If this heuristic fails, the structure theorem approach will need to be
used.
The argument w == Dtheta/theta
"""
# TODO: finish writing this and write tests
c1 = c1 or Dummy('c1')
p, a = fa.div(fd)
q, b = wa.div(wd)
B = max(0, derivation(DE.t, DE).degree(DE.t) - 1)
C = max(p.degree(DE.t), q.degree(DE.t))
if q.degree(DE.t) > B:
eqs = [p.nth(i) - c1*q.nth(i) for i in range(B + 1, C + 1)]
s = solve(eqs, c1)
if not s or not s[c1].is_Rational:
# deg(q) > B, no solution for c.
return None
M, N = s[c1].as_numer_denom()
nfmwa = N*fa*wd - M*wa*fd
nfmwd = fd*wd
Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd, fd*wd, DE,
'auto')
if Qv is None:
# (N*f - M*w) is not the logarithmic derivative of a k(t)-radical.
return None
Q, v = Qv
if Q.is_zero or v.is_zero:
return None
return (Q*N, Q*M, v)
if p.degree(DE.t) > B:
return None
c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC())
l = fd.monic().lcm(wd.monic())*Poly(c, DE.t)
ln, ls = splitfactor(l, DE)
z = ls*ln.gcd(ln.diff(DE.t))
if not z.has(DE.t):
# TODO: We treat this as 'no solution', until the structure
# theorem version of parametric_log_deriv is implemented.
return None
u1, r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z)
u2, r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z)
eqs = [r1.nth(i) - c1*r2.nth(i) for i in range(z.degree(DE.t))]
s = solve(eqs, c1)
if not s or not s[c1].is_Rational:
# deg(q) <= B, no solution for c.
return None
M, N = s[c1].as_numer_denom()
nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd
nfmwd = fd*wd
Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE)
if Qv is None:
# (N*f - M*w) is not the logarithmic derivative of a k(t)-radical.
return None
Q, v = Qv
if Q.is_zero or v.is_zero:
return None
return (Q*N, Q*M, v)
def parametric_log_deriv(fa, fd, wa, wd, DE):
# TODO: Write the full algorithm using the structure theorems.
# try:
A = parametric_log_deriv_heu(fa, fd, wa, wd, DE)
# except NotImplementedError:
# Heuristic failed, we have to use the full method.
# TODO: This could be implemented more efficiently.
# It isn't too worrisome, because the heuristic handles most difficult
# cases.
return A
def is_deriv_k(fa, fd, DE):
r"""
Checks if Df/f is the derivative of an element of k(t).
a in k(t) is the derivative of an element of k(t) if there exists b in k(t)
such that a = Db. Either returns (ans, u), such that Df/f == Du, or None,
which means that Df/f is not the derivative of an element of k(t). ans is
a list of tuples such that Add(*[i*j for i, j in ans]) == u. This is useful
for seeing exactly which elements of k(t) produce u.
This function uses the structure theorem approach, which says that for any
f in K, Df/f is the derivative of a element of K if and only if there are ri
in QQ such that::
--- --- Dt
\ r * Dt + \ r * i Df
/ i i / i --- = --.
--- --- t f
i in L i in E i
K/C(x) K/C(x)
Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
hyperexponential monomials of K over C(x)). If K is an elementary extension
over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
transcendence degree of K over C(x). Furthermore, because Const_D(K) ==
Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
and L_K/C(x) are disjoint.
The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
recursively using this same function. Therefore, it is required to pass
them as indices to D (or T). E_args are the arguments of the
hyperexponentials indexed by E_K (i.e., if i is in E_K, then T[i] ==
exp(E_args[i])). This is needed to compute the final answer u such that
Df/f == Du.
log(f) will be the same as u up to a additive constant. This is because
they will both behave the same as monomials. For example, both log(x) and
log(2*x) == log(x) + log(2) satisfy Dt == 1/x, because log(2) is constant.
Therefore, the term const is returned. const is such that
log(const) + f == u. This is calculated by dividing the arguments of one
logarithm from the other. Therefore, it is necessary to pass the arguments
of the logarithmic terms in L_args.
To handle the case where we are given Df/f, not f, use is_deriv_k_in_field().
See also
========
is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical
"""
# Compute Df/f
dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd*fa
dfa, dfd = dfa.cancel(dfd, include=True)
# Our assumption here is that each monomial is recursively transcendental
if len(DE.exts) != len(DE.D):
if [i for i in DE.cases if i == 'tan'] or \
(set([i for i in DE.cases if i == 'primitive']) -
set(DE.indices('log'))):
raise NotImplementedError("Real version of the structure "
"theorems with hypertangent support is not yet implemented.")
# TODO: What should really be done in this case?
raise NotImplementedError("Nonelementary extensions not supported "
"in the structure theorems.")
E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')]
L_part = [DE.D[i].as_expr() for i in DE.indices('log')]
lhs = Matrix([E_part + L_part])
rhs = Matrix([dfa.as_expr()/dfd.as_expr()])
A, u = constant_system(lhs, rhs, DE)
if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A:
# If the elements of u are not all constant
# Note: See comment in constant_system
# Also note: derivation(basic=True) calls cancel()
return None
else:
if not all(i.is_Rational for i in u):
raise NotImplementedError("Cannot work with non-rational "
"coefficients in this case.")
else:
terms = ([DE.extargs[i] for i in DE.indices('exp')] +
[DE.T[i] for i in DE.indices('log')])
ans = list(zip(terms, u))
result = Add(*[Mul(i, j) for i, j in ans])
argterms = ([DE.T[i] for i in DE.indices('exp')] +
[DE.extargs[i] for i in DE.indices('log')])
l = []
ld = []
for i, j in zip(argterms, u):
# We need to get around things like sqrt(x**2) != x
# and also sqrt(x**2 + 2*x + 1) != x + 1
# Issue 10798: i need not be a polynomial
i, d = i.as_numer_denom()
icoeff, iterms = sqf_list(i)
l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for b, e in iterms])))
dcoeff, dterms = sqf_list(d)
ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j) for b, e in dterms])))
const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld))
return (ans, result, const)
def is_log_deriv_k_t_radical(fa, fd, DE, Df=True):
r"""
Checks if Df is the logarithmic derivative of a k(t)-radical.
b in k(t) can be written as the logarithmic derivative of a k(t) radical if
there exist n in ZZ and u in k(t) with n, u != 0 such that n*b == Du/u.
Either returns (ans, u, n, const) or None, which means that Df cannot be
written as the logarithmic derivative of a k(t)-radical. ans is a list of
tuples such that Mul(*[i**j for i, j in ans]) == u. This is useful for
seeing exactly what elements of k(t) produce u.
This function uses the structure theorem approach, which says that for any
f in K, Df is the logarithmic derivative of a K-radical if and only if there
are ri in QQ such that::
--- --- Dt
\ r * Dt + \ r * i
/ i i / i --- = Df.
--- --- t
i in L i in E i
K/C(x) K/C(x)
Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
hyperexponential monomials of K over C(x)). If K is an elementary extension
over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
transcendence degree of K over C(x). Furthermore, because Const_D(K) ==
Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
and L_K/C(x) are disjoint.
The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
recursively using this same function. Therefore, it is required to pass
them as indices to D (or T). L_args are the arguments of the logarithms
indexed by L_K (i.e., if i is in L_K, then T[i] == log(L_args[i])). This is
needed to compute the final answer u such that n*f == Du/u.
exp(f) will be the same as u up to a multiplicative constant. This is
because they will both behave the same as monomials. For example, both
exp(x) and exp(x + 1) == E*exp(x) satisfy Dt == t. Therefore, the term const
is returned. const is such that exp(const)*f == u. This is calculated by
subtracting the arguments of one exponential from the other. Therefore, it
is necessary to pass the arguments of the exponential terms in E_args.
To handle the case where we are given Df, not f, use
is_log_deriv_k_t_radical_in_field().
See also
========
is_log_deriv_k_t_radical_in_field, is_deriv_k
"""
if Df:
dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2,
include=True)
else:
dfa, dfd = fa, fd
# Our assumption here is that each monomial is recursively transcendental
if len(DE.exts) != len(DE.D):
if [i for i in DE.cases if i == 'tan'] or \
(set([i for i in DE.cases if i == 'primitive']) -
set(DE.indices('log'))):
raise NotImplementedError("Real version of the structure "
"theorems with hypertangent support is not yet implemented.")
# TODO: What should really be done in this case?
raise NotImplementedError("Nonelementary extensions not supported "
"in the structure theorems.")
E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')]
L_part = [DE.D[i].as_expr() for i in DE.indices('log')]
lhs = Matrix([E_part + L_part])
rhs = Matrix([dfa.as_expr()/dfd.as_expr()])
A, u = constant_system(lhs, rhs, DE)
if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A:
# If the elements of u are not all constant
# Note: See comment in constant_system
# Also note: derivation(basic=True) calls cancel()
return None
else:
if not all(i.is_Rational for i in u):
# TODO: But maybe we can tell if they're not rational, like
# log(2)/log(3). Also, there should be an option to continue
# anyway, even if the result might potentially be wrong.
raise NotImplementedError("Cannot work with non-rational "
"coefficients in this case.")
else:
n = reduce(ilcm, [i.as_numer_denom()[1] for i in u])
u *= n
terms = ([DE.T[i] for i in DE.indices('exp')] +
[DE.extargs[i] for i in DE.indices('log')])
ans = list(zip(terms, u))
result = Mul(*[Pow(i, j) for i, j in ans])
# exp(f) will be the same as result up to a multiplicative
# constant. We now find the log of that constant.
argterms = ([DE.extargs[i] for i in DE.indices('exp')] +
[DE.T[i] for i in DE.indices('log')])
const = cancel(fa.as_expr()/fd.as_expr() -
Add(*[Mul(i, j/n) for i, j in zip(argterms, u)]))
return (ans, result, n, const)
def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None):
"""
Checks if f can be written as the logarithmic derivative of a k(t)-radical.
It differs from is_log_deriv_k_t_radical(fa, fd, DE, Df=False)
for any given fa, fd, DE in that it finds the solution in the
given field not in some (possibly unspecified extension) and
"in_field" with the function name is used to indicate that.
f in k(t) can be written as the logarithmic derivative of a k(t) radical if
there exist n in ZZ and u in k(t) with n, u != 0 such that n*f == Du/u.
Either returns (n, u) or None, which means that f cannot be written as the
logarithmic derivative of a k(t)-radical.
case is one of {'primitive', 'exp', 'tan', 'auto'} for the primitive,
hyperexponential, and hypertangent cases, respectively. If case is 'auto',
it will attempt to determine the type of the derivation automatically.
See also
========
is_log_deriv_k_t_radical, is_deriv_k
"""
fa, fd = fa.cancel(fd, include=True)
# f must be simple
n, s = splitfactor(fd, DE)
if not s.is_one:
pass
z = z or Dummy('z')
H, b = residue_reduce(fa, fd, DE, z=z)
if not b:
# I will have to verify, but I believe that the answer should be
# None in this case. This should never happen for the
# functions given when solving the parametric logarithmic
# derivative problem when integration elementary functions (see
# Bronstein's book, page 255), so most likely this indicates a bug.
return None
roots = [(i, i.real_roots()) for i, _ in H]
if not all(len(j) == i.degree() and all(k.is_Rational for k in j) for
i, j in roots):
# If f is the logarithmic derivative of a k(t)-radical, then all the
# roots of the resultant must be rational numbers.
return None
# [(a, i), ...], where i*log(a) is a term in the log-part of the integral
# of f
respolys, residues = list(zip(*roots)) or [[], []]
# Note: this might be empty, but everything below should work find in that
# case (it should be the same as if it were [[1, 1]])
residueterms = [(H[j][1].subs(z, i), i) for j in range(len(H)) for
i in residues[j]]
# TODO: finish writing this and write tests
p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z))
p = p.as_poly(DE.t)
if p is None:
# f - Dg will be in k[t] if f is the logarithmic derivative of a k(t)-radical
return None
if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)):
return None
if case == 'auto':
case = DE.case
if case == 'exp':
wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True)
with DecrementLevel(DE):
pa, pd = frac_in(p, DE.t, cancel=True)
wa, wd = frac_in((wa, wd), DE.t)
A = parametric_log_deriv(pa, pd, wa, wd, DE)
if A is None:
return None
n, e, u = A
u *= DE.t**e
elif case == 'primitive':
with DecrementLevel(DE):
pa, pd = frac_in(p, DE.t)
A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto')
if A is None:
return None
n, u = A
elif case == 'base':
# TODO: we can use more efficient residue reduction from ratint()
if not fd.is_sqf or fa.degree() >= fd.degree():
# f is the logarithmic derivative in the base case if and only if
# f = fa/fd, fd is square-free, deg(fa) < deg(fd), and
# gcd(fa, fd) == 1. The last condition is handled by cancel() above.
return None
# Note: if residueterms = [], returns (1, 1)
# f had better be 0 in that case.
n = reduce(ilcm, [i.as_numer_denom()[1] for _, i in residueterms], S.One)
u = Mul(*[Pow(i, j*n) for i, j in residueterms])
return (n, u)
elif case == 'tan':
raise NotImplementedError("The hypertangent case is "
"not yet implemented for is_log_deriv_k_t_radical_in_field()")
elif case in ['other_linear', 'other_nonlinear']:
# XXX: If these are supported by the structure theorems, change to NotImplementedError.
raise ValueError("The %s case is not supported in this function." % case)
else:
raise ValueError("case must be one of {'primitive', 'exp', 'tan', "
"'base', 'auto'}, not %s" % case)
common_denom = reduce(ilcm, [i.as_numer_denom()[1] for i in [j for _, j in
residueterms]] + [n], S.One)
residueterms = [(i, j*common_denom) for i, j in residueterms]
m = common_denom//n
if common_denom != n*m: # Verify exact division
raise ValueError("Inexact division")
u = cancel(u**m*Mul(*[Pow(i, j) for i, j in residueterms]))
return (common_denom, u)
| {
"content_hash": "e28b9b5c66ad1e47b20221bac2d1beae",
"timestamp": "",
"source": "github",
"line_count": 1273,
"max_line_length": 110,
"avg_line_length": 40.180675569520815,
"alnum_prop": 0.5523949169110459,
"repo_name": "kaushik94/sympy",
"id": "d1b130cdf33782e411ebef4753c38d10fa26581b",
"size": "51150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/integrals/prde.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5094"
},
{
"name": "Python",
"bytes": "13553568"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "TeX",
"bytes": "32356"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
from poppy.model.helpers import provider_details
def load_from_json(json_data):
access_urls = json_data.get("access_urls")
error_info = json_data.get("error_info", )
provider_service_id = json_data.get("id")
status = json_data.get("status")
return provider_details.ProviderDetail(
provider_service_id=provider_service_id,
access_urls=access_urls,
status=status,
error_info=error_info)
| {
"content_hash": "192b8f49cf803385d7dc52ab4d432c55",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 48,
"avg_line_length": 33.76923076923077,
"alnum_prop": 0.6765375854214123,
"repo_name": "obulpathi/poppy",
"id": "55f21cb8ce8b32f4845d50ea4699ebd39a353bf3",
"size": "1024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poppy/transport/pecan/models/request/provider_details.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1281"
},
{
"name": "PigLatin",
"bytes": "808"
},
{
"name": "Python",
"bytes": "1265113"
},
{
"name": "Shell",
"bytes": "12042"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class ResourceMgtConfig(AppConfig):
name = 'tbpc.resource_mgt'
| {
"content_hash": "d43149e649ddf8ba8aebb0f9d8b9ffb1",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 35,
"avg_line_length": 20.6,
"alnum_prop": 0.7669902912621359,
"repo_name": "mpdevilleres/tbpc_app",
"id": "d6ab6a84dc235c281a518b08ad2b978ef1a5c5e4",
"size": "103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tbpc/resource_mgt/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "97568"
},
{
"name": "Nginx",
"bytes": "1096"
},
{
"name": "Python",
"bytes": "233826"
},
{
"name": "Shell",
"bytes": "8861"
}
],
"symlink_target": ""
} |
from django.urls import path
from web.backend import views
from django.views.decorators.csrf import csrf_exempt
app_name = "backend"
urlpatterns = [
path('', views.index, name='index'),
# task log
path("tasklog/<int:task_id>", views.tasklog, name="tasklog"),
# task debug log
path("debuglog/<int:task_id>", views.debuglog, name="debuglog"),
# download debug log
path("downloadlog/<int:task_id>", views.downloadlog, name="downloadlog"),
# upload log
path("uploadlog", csrf_exempt(views.uploadlog), name="uploadlog")
]
| {
"content_hash": "c88637f0c4322150363754eddd74c928",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 77,
"avg_line_length": 27.9,
"alnum_prop": 0.6810035842293907,
"repo_name": "LoRexxar/Cobra-W",
"id": "7a4686f6153a6cfe80934eb92ba66bc5315c3546",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/backend/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "204"
},
{
"name": "Hack",
"bytes": "82"
},
{
"name": "Java",
"bytes": "45"
},
{
"name": "PHP",
"bytes": "6172"
},
{
"name": "Python",
"bytes": "441482"
}
],
"symlink_target": ""
} |
import sys
import os
import re
import subprocess
from setuptools import setup, find_packages, Command
from distutils.command.build import build as BuildCommand
from setuptools.command.install import install as InstallCommand
from distutils.command.clean import clean as CleanCommand
from setuptools.command.test import test as TestCommand
import distutils
ROOT_PATH = os.path.dirname(
os.path.realpath(__file__)
)
SOURCE_PATH = os.path.join(
ROOT_PATH, 'source'
)
RESOURCE_PATH = os.path.join(
ROOT_PATH, 'resource'
)
RESOURCE_TARGET_PATH = os.path.join(
SOURCE_PATH, 'riffle', 'resource.py'
)
README_PATH = os.path.join(ROOT_PATH, 'README.rst')
ON_READ_THE_DOCS = os.environ.get('READTHEDOCS', None) == 'True'
# Custom commands.
class BuildResources(Command):
'''Build additional resources.'''
user_options = []
def initialize_options(self):
'''Configure default options.'''
def finalize_options(self):
'''Finalize options to be used.'''
self.resource_source_path = os.path.join(RESOURCE_PATH, 'resource.qrc')
self.resource_target_path = RESOURCE_TARGET_PATH
def run(self):
'''Run build.'''
if ON_READ_THE_DOCS:
# PySide not available.
return
try:
pyside_rcc_command = 'pyside-rcc'
# On Windows, pyside-rcc is not automatically available on the
# PATH so try to find it manually.
if sys.platform == 'win32':
import PySide
pyside_rcc_command = os.path.join(
os.path.dirname(PySide.__file__),
'pyside-rcc.exe'
)
subprocess.check_call([
pyside_rcc_command,
'-o',
self.resource_target_path,
self.resource_source_path
])
except (subprocess.CalledProcessError, OSError):
print(
'Error compiling resource.py using pyside-rcc. Possibly '
'pyside-rcc could not be found. You might need to manually add '
'it to your PATH.'
)
raise SystemExit()
class Build(BuildCommand):
'''Custom build to pre-build resources.'''
def run(self):
'''Run build ensuring build_resources called first.'''
self.run_command('build_resources')
BuildCommand.run(self)
class Install(InstallCommand):
'''Custom install to pre-build resources.'''
def do_egg_install(self):
'''Run install ensuring build_resources called first.
.. note::
`do_egg_install` used rather than `run` as sometimes `run` is not
called at all by setuptools.
'''
self.run_command('build_resources')
InstallCommand.do_egg_install(self)
class Clean(CleanCommand):
'''Custom clean to remove built resources and distributions.'''
def run(self):
'''Run clean.'''
relative_resource_path = os.path.relpath(
RESOURCE_TARGET_PATH, ROOT_PATH
)
if os.path.exists(relative_resource_path):
os.remove(relative_resource_path)
else:
distutils.log.warn(
'\'{0}\' does not exist -- can\'t clean it'
.format(relative_resource_path)
)
relative_compiled_resource_path = relative_resource_path + 'c'
if os.path.exists(relative_compiled_resource_path):
os.remove(relative_compiled_resource_path)
else:
distutils.log.warn(
'\'{0}\' does not exist -- can\'t clean it'
.format(relative_compiled_resource_path)
)
CleanCommand.run(self)
class PyTest(TestCommand):
'''Pytest command.'''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
'''Import pytest and run.'''
import pytest
errno = pytest.main(self.test_args)
raise SystemExit(errno)
with open(os.path.join(SOURCE_PATH, 'riffle', '_version.py')) as _version_file:
VERSION = re.match(
r'.*__version__ = \'(.*?)\'',
_version_file.read(),
re.DOTALL
).group(1)
# Compute dependencies.
SETUP_REQUIRES = [
'PySide >= 1.2.2, < 2',
'sphinx >= 1.2.2, < 2',
'sphinx_rtd_theme >= 0.1.6, < 1',
'lowdown >= 0.1.0, < 2',
'mock >=2, < 3'
]
INSTALL_REQUIRES = [
'PySide >= 1.2.2, < 2',
'clique >= 1.2.0, < 2'
]
TEST_REQUIRES = [
'pytest >= 2.3.5, < 3'
]
# Readthedocs requires Sphinx extensions to be specified as part of
# install_requires in order to build properly.
if ON_READ_THE_DOCS:
INSTALL_REQUIRES.extend(SETUP_REQUIRES)
# PySide not available.
SETUP_REQUIRES = [
requirement for requirement in SETUP_REQUIRES
if not requirement.startswith("PySide ")
]
INSTALL_REQUIRES = [
requirement for requirement in INSTALL_REQUIRES
if not requirement.startswith("PySide ")
]
setup(
name='Riffle',
version=VERSION,
description='Filesystem browser for PySide.',
long_description=open(README_PATH).read(),
keywords='filesystem, browser, pyside, qt, pyqt',
url='https://gitlab.com/4degrees/riffle',
author='Martin Pengelly-Phillips',
author_email='martin@4degrees.ltd.uk',
license='Apache License (2.0)',
packages=find_packages(SOURCE_PATH),
package_dir={
'': 'source'
},
setup_requires=SETUP_REQUIRES,
install_requires=INSTALL_REQUIRES,
tests_require=TEST_REQUIRES,
extras_require={
'setup': SETUP_REQUIRES,
'tests': TEST_REQUIRES,
'dev': SETUP_REQUIRES + TEST_REQUIRES
},
cmdclass={
'build': Build,
'build_resources': BuildResources,
'install': Install,
'clean': Clean,
'test': PyTest
}
)
| {
"content_hash": "cea34ae7325a096226a06855b1b6e8bc",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 80,
"avg_line_length": 27.307339449541285,
"alnum_prop": 0.5948261380816395,
"repo_name": "4degrees/riffle",
"id": "917cef98c5b350c9f71eb31c8292d1833a5120fc",
"size": "6058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "33026"
}
],
"symlink_target": ""
} |
"""
Helper module for ELMR version information
"""
##########################################################################
## Versioning
##########################################################################
__version_info__ = {
'major': 0,
'minor': 4,
'micro': 5,
'releaselevel': 'final',
'serial': 0,
}
def get_version(short=False):
"""
Returns the version from the version info.
"""
assert __version_info__['releaselevel'] in ('alpha', 'beta', 'final')
vers = ["%(major)i.%(minor)i" % __version_info__, ]
if __version_info__['micro']:
vers.append(".%(micro)i" % __version_info__)
if __version_info__['releaselevel'] != 'final' and not short:
vers.append('%s%i' % (__version_info__['releaselevel'][0],
__version_info__['serial']))
return ''.join(vers)
| {
"content_hash": "401836139edba51468f5bd782f3bfdb6",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 74,
"avg_line_length": 29.75862068965517,
"alnum_prop": 0.44380069524913096,
"repo_name": "bbengfort/jobs-report",
"id": "db8bfc93f03ab9622fb6b83b0de3625587105f75",
"size": "1165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elmr/version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "82716"
},
{
"name": "HTML",
"bytes": "45222"
},
{
"name": "JavaScript",
"bytes": "32869"
},
{
"name": "Makefile",
"bytes": "818"
},
{
"name": "PLpgSQL",
"bytes": "324502"
},
{
"name": "Python",
"bytes": "121814"
}
],
"symlink_target": ""
} |
"""This is part of [pyspaces](https://github.com/Friz-zy/pyspaces)
License: MIT or BSD or Apache 2.0
Copyright (c) 2014 Filipp Kucheryavy aka Frizzy <filipp.s.frizzy@gmail.com>
"""
import os
import sys
import errno
from .libc import *
from signal import SIGKILL, SIGCHLD
try:
from multiprocessing.forking import Popen
except ImportError:
from multiprocessing.popen_fork import Popen
STACK_SIZE = 1024 * 1024
"""STACK_SIZE (1024 * 1024)"""
# cloning flags
# src: linux/include/uapi/linux/sched.h
CLONE_VM = 0x00000100
"""set if VM shared between processes"""
CLONE_FS = 0x00000200
"""set if fs info shared between processes"""
CLONE_FILES = 0x00000400
"""set if open files shared between processes"""
CLONE_SIGHAND = 0x00000800
"""set if signal handlers and blocked signals shared"""
CLONE_PTRACE = 0x00002000
"""set if we want to let tracing continue on the child too"""
CLONE_VFORK = 0x00004000
"""set if the parent wants the child to wake it up on mm_release"""
CLONE_PARENT = 0x00008000
"""set if we want to have the same parent as the cloner"""
CLONE_THREAD = 0x00010000
"""Same thread group?"""
CLONE_NEWNS = 0x00020000
"""New mount namespace group"""
CLONE_SYSVSEM = 0x00040000
"""share system V SEM_UNDO semantics"""
CLONE_SETTLS = 0x00080000
"""create a new TLS for the child"""
CLONE_PARENT_SETTID = 0x00100000
"""set the TID in the parent"""
CLONE_CHILD_CLEARTID = 0x00200000
"""clear the TID in the child"""
CLONE_DETACHED = 0x00400000
"""Unused, ignored"""
CLONE_UNTRACED = 0x00800000
"""set if the tracing process can't force CLONE_PTRACE on this clone"""
CLONE_CHILD_SETTID = 0x01000000
"""set the TID in the child"""
CLONE_NEWUTS = 0x04000000
"""New utsname namespace"""
CLONE_NEWIPC = 0x08000000
"""New ipc namespace"""
CLONE_NEWUSER = 0x10000000
"""New user namespace"""
CLONE_NEWPID = 0x20000000
"""New pid namespace"""
CLONE_NEWNET = 0x40000000
"""New network namespace"""
CLONE_IO = 0x80000000
"""Clone io context"""
class Clone(Popen):
"""Inheritance from `multiprocessing.forking.Popen`.
We define a Popen class similar to the one from subprocess, but
whose constructor takes a process object as its argument.
Raises:
OSError: can not execute glibc.clone function
RuntimeError: parent doesn't close its pipe descriptor
IOError: do not have permission to write to a file.
Child will be killed with signal.SIGKILL.
"""
def __init__(self, process_obj):
"""Execute linux clone.
Create a child process in new namespace(s);
allow UID and GID mappings to be specified when
creating a user namespace.
Raises:
OSError: can not execute glibc.clone function
"""
sys.stdout.flush()
sys.stderr.flush()
self.process_obj = process_obj
self.returncode = None
self.pipe_fd = os.pipe()
# clone attributes
flags = process_obj.__dict__.get('clone_flags', 0)
uid_map = process_obj.__dict__.get('uid_map', "")
gid_map = process_obj.__dict__.get('gid_map', "")
map_zero = process_obj.__dict__.get('map_zero', False)
proc = process_obj.__dict__.get('proc', '/proc')
# Create the child in new namespace(s)
child = CFUNCTYPE(c_int)(self.child)
child_stack = create_string_buffer(STACK_SIZE)
child_stack_pointer = c_void_p(cast(child_stack, c_void_p).value + STACK_SIZE)
self.pid = libc.clone(child, child_stack_pointer, flags | SIGCHLD)
if self.pid == -1:
e = get_errno()
raise OSError(e, os.strerror(e))
# Update the UID and GID maps in the child
def arg2map(arg):
# int: map given uid to root
# str: like int or in format
# ' '.join((<start uid in new ns>,
# <start uid in current ns>,
# <range to mapping>
# )). Example "0 1000 1" will map 1000 uid as root,
# "0 1000 1,1 1001 1" will map also 1001 as uid 1.
# list: list of int or str
if type(arg) is int:
return "0 %d 1" % arg
if not hasattr(arg, '__iter__'):
arg = arg.split(',')
if ' ' not in arg[0]:
arg = ['%d %s 1' % (i, d) for i, d in enumerate(arg)]
return '\n'.join(arg)
if uid_map or map_zero:
map_path = "%s/%s/uid_map" % (proc, self.pid)
if map_zero or type(uid_map) is bool:
uid_map = "0 %d 1" % os.getuid()
else:
uid_map = arg2map(uid_map)
self.update_map(uid_map, map_path)
if gid_map or map_zero:
# Writing "deny" to the /proc/[pid]/setgroups file before writing to
# /proc/[pid]/gid_map will permanently disable setgroups(2) in a user
# namespace and allow writing to /proc/[pid]/gid_map without having the
# CAP_SETGID capability in the parent user namespace.
with open("%s/%s/setgroups" % (proc, self.pid), 'w') as f:
f.write("deny")
map_path = "%s/%s/gid_map" % (proc, self.pid)
if map_zero or type(gid_map) is bool:
gid_map = "0 %d 1" % os.getgid()
else:
gid_map = arg2map(gid_map)
self.update_map(gid_map, map_path)
# Close the write end of the pipe, to signal to the child that we
# have updated the UID and GID maps
os.close(self.pipe_fd[1])
self.sentinel = self.pipe_fd[0]
def child(self):
"""Start function for cloned child.
Wait until the parent has updated the UID and GID mappings.
See the comment in main(). We wait for end of file on a
pipe that will be closed by the parent process once it has
updated the mappings.
Raises:
RuntimeError: parent doesn't close its pipe descriptor
"""
# Close our descriptor for the write
# end of the pipe so that we see EOF
# when parent closes its descriptor
os.close(self.pipe_fd[1])
if os.read(self.pipe_fd[0], 1):
raise RuntimeError(
'Failure in child:'
' parent doesn\'t close its descriptor'
)
if 'random' in sys.modules:
import random
random.seed()
code = self.process_obj._bootstrap()
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
def update_map(self, mapping, map_file):
"""
Update the mapping file 'map_file', with the value provided in
'mapping', a string that defines a UID or GID mapping. A UID or
GID mapping consists of one or more newline-delimited records
of the form:
ID_inside-ns ID-outside-ns length
Requiring the user to supply a string that contains newlines is
of course inconvenient for command-line use. Thus, we permit the
use of commas to delimit records in this string, and replace them
with newlines before writing the string to the file.
Raises:
IOError: do not have permission to write to a file.
Child will be killed with signal.SIGKILL.
"""
#Replace commas in mapping string with newlines
mapping = mapping.replace(',', '\n')
try:
with open(map_file, 'w') as f:
f.write(mapping)
except IOError as e:
os.kill(self.pid, SIGKILL)
raise IOError(
"Can not write %s: %s\nAborting!" % (map_file, e)
)
| {
"content_hash": "2edc6997485d7f342746ca1a2a227cf2",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 86,
"avg_line_length": 34.284444444444446,
"alnum_prop": 0.5961887477313974,
"repo_name": "Friz-zy/pyspaces",
"id": "2e30a78e0eb7c4a2e9d1958b1521c4d11510eb2b",
"size": "7751",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyspaces/cloning.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48550"
}
],
"symlink_target": ""
} |
__author__ = 'tmkasun'
from pyspark.context import SparkContext, SparkConf
from pyspark.mllib.clustering import KMeans
from matplotlib import pyplot
from conf.configurations import project
from libs.analyser import SparkAnalyser
def main():
# Setup Spark context by setting application name and running mode, `localhost` is a special string here
spark_configuration = SparkConf().setAppName("Vehicles_KMean_clustering").setMaster('local')
spark_context = SparkContext(conf=spark_configuration)
s_analyser = SparkAnalyser(spark_context)
data_rdd = s_analyser.load_data(project['data_file'])
# TODO: Only for debug purpose
# init_data_count = data_rdd.count()
# print("INFO: Number of records in the data set {}".format(init_data_count))
listed_data_rdd = data_rdd.map(s_analyser.data_extractor)
# Filtering unwanted data rows
elect_filtered_rdd = listed_data_rdd.filter(s_analyser.electric_vehicles_filter)
filtered_rdd = elect_filtered_rdd.filter(s_analyser.empty_cost_filter)
# Mapping related data to convenient format for clustering
cost_tx_rdd = filtered_rdd.map(s_analyser.cost_transform_mapper)
feature_mapped_rdd = cost_tx_rdd.map(s_analyser.feature_mapper)
# TODO: Only for debug purpose
debug_message = "INFO: Filtered Data count = {} Rejected count = {}\nSample line `{}`"
filtered_data_count = cost_tx_rdd.count()
# rejected_data = init_data_count - filtered_data_count
# print(debug_message.format(filtered_data_count, rejected_data))
iterations_count = 10
runs_count = 10
k_value = 5
model = KMeans.train(feature_mapped_rdd, k_value, iterations_count, runs_count)
wssse = model.computeCost(feature_mapped_rdd)
print("INFO: Within Set Sum of Squared Error = {}".format(wssse))
cluster_centers = model.clusterCenters
model.save(spark_context, "identified_clusters")
for center in cluster_centers:
pyplot.scatter(center[0], center[1])
pyplot.title("Cluster Centroids")
pyplot.xlabel("Europe Label")
pyplot.ylabel("Feature Normalized")
pyplot.show()
if __name__ == '__main__':
main() | {
"content_hash": "4dd0a816e059f89ce12d022021c15539",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 108,
"avg_line_length": 34.75806451612903,
"alnum_prop": 0.7118329466357308,
"repo_name": "tmkasun/bigdata_spark",
"id": "692cb25b55512c8b856663b07725c5523ad87922",
"size": "2155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cluster_records.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15032"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
version = (0, 3, 10)
from .client import VimeoClient
from . import exceptions
| {
"content_hash": "335332270b8d374a11d984bf677f0343",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 38,
"avg_line_length": 19.833333333333332,
"alnum_prop": 0.7394957983193278,
"repo_name": "gabrielgisoldo/vimeo.py",
"id": "08d5bb86ba2abbd1570b6bd60c9e1a2415412118",
"size": "161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vimeo/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20364"
}
],
"symlink_target": ""
} |
import os
import unittest
import numpy
import time
import paddle
import paddle.fluid as fluid
BATCH_SIZE = 64
def convolutional_neural_network(use_py_reader):
with fluid.unique_name.guard():
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
py_reader = None
if use_py_reader:
py_reader = fluid.io.DataLoader.from_generator(
capacity=64,
feed_list=[img, label],
iterable=False,
use_double_buffer=False,
)
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu",
)
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu",
)
prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = paddle.mean(loss)
acc = fluid.layers.accuracy(input=prediction, label=label)
i = fluid.layers.zeros(shape=[1], dtype='int64')
array = fluid.layers.array_write(x=prediction, i=i)
fluid.layers.increment(i)
fluid.layers.array_write(x=acc, i=i, array=array)
return array, img, label, prediction, avg_loss, acc, py_reader
def test():
place = fluid.CPUPlace()
exe = fluid.Executor(place)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE
)
(
array,
img,
label,
prediction,
avg_loss,
acc,
py_reader,
) = convolutional_neural_network(use_py_reader=False)
feeder = fluid.DataFeeder(feed_list=[img, label], place=place)
def train_test(train_test_program, train_test_feed, train_test_reader):
acc_set = []
avg_loss_set = []
for test_data in train_test_reader():
acc_np, avg_loss_np = exe.run(
program=train_test_program,
feed=train_test_feed.feed(test_data),
fetch_list=[acc, avg_loss],
)
acc_set.append(float(acc_np))
avg_loss_set.append(float(avg_loss_np))
# get test acc and loss
acc_val_mean = numpy.array(acc_set).mean()
avg_loss_val_mean = numpy.array(avg_loss_set).mean()
return avg_loss_val_mean, acc_val_mean
# test for epoch
avg_loss_val, acc_val = train_test(
train_test_program=fluid.default_main_program(),
train_test_reader=test_reader,
train_test_feed=feeder,
)
print("Test: avg_cost: %s, acc: %s" % (avg_loss_val, acc_val))
assert acc_val > 0.96
def train(use_cuda, thread_num, cpu_num):
if use_cuda and not fluid.core.is_compiled_with_cuda():
print("paddle is not compiled with cuda, exit!")
return
(
array,
img,
label,
prediction,
avg_loss,
acc,
py_reader,
) = convolutional_neural_network(use_py_reader=True)
print("build convolutional neural network done.")
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
optimizer.minimize(avg_loss)
print("Adam optimizer minimize done.")
train_reader = paddle.batch(
paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500),
batch_size=BATCH_SIZE,
)
print("declared train reader done.")
place = fluid.CPUPlace()
exe = fluid.Executor(place)
print("going to run startup program")
exe.run(fluid.default_startup_program())
print("run startup program done.")
os.environ['CPU_NUM'] = str(cpu_num)
print("cpu_num:" + str(cpu_num))
print("thread_num:" + str(thread_num))
build_strategy = fluid.BuildStrategy()
build_strategy.async_mode = True
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = thread_num
exec_strategy.num_iteration_per_run = 10
main_program = fluid.default_main_program()
pe = fluid.ParallelExecutor(
use_cuda=False,
loss_name=avg_loss.name,
main_program=main_program,
build_strategy=build_strategy,
exec_strategy=exec_strategy,
)
print("declare parallel executor done.")
py_reader.set_sample_list_generator(train_reader)
for pass_id in range(2):
step = 0
py_reader.start()
try:
while True:
array_v, acc_v, prediction_v, loss_val = pe.run(
fetch_list=[array, acc, prediction, avg_loss.name]
)
assert numpy.allclose(array_v[0], prediction_v)
assert numpy.allclose(array_v[1], acc_v)
loss_val = numpy.mean(loss_val)
if step % 10 == 0:
print(
"Pass %d, Batch %d, Cost %f, queue size %d"
% (pass_id, step, loss_val, py_reader.queue.size())
)
step += 1
except fluid.core.EOFException:
print("train end pass = " + str(pass_id))
py_reader.reset()
return step
class TestAsyncSSAGraphExecutor(unittest.TestCase):
def test_check_async_ssa_exe_train(self):
step_list = []
for cpu_num in [1, 2, 4]:
print("run cpu_num -> " + str(cpu_num))
with fluid.scope_guard(fluid.core.Scope()):
with fluid.program_guard(
main_program=fluid.Program(),
startup_program=fluid.Program(),
):
start_time = time.time()
step = train(
use_cuda=False, thread_num=cpu_num, cpu_num=cpu_num
)
end_time = time.time()
step_list.append(step)
print(
"cpu_num -> "
+ str(cpu_num)
+ " step -> "
+ str(step)
+ " time -> "
+ str(end_time - start_time)
)
with fluid.program_guard(
main_program=fluid.Program(),
startup_program=fluid.Program(),
):
test()
assert abs(int(step_list[0] / 2) - int(step_list[1])) < 5
assert abs(int(step_list[1] / 2) - int(step_list[2])) < 5
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "be5508326394a56c8d16b93481ffd8b8",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 79,
"avg_line_length": 31.29953917050691,
"alnum_prop": 0.5385747938751473,
"repo_name": "luotao1/Paddle",
"id": "41fc17187093ce6f63d61981173e9a9636f8d721",
"size": "7405",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
} |
"""
auto save window geometry
Tested environment:
Mac OS X 10.6.8
http://doc.qt.nokia.com/latest/qdesktopwidget.html
http://www.pyside.org/docs/pyside/PySide/QtGui/QWidget.html
"""
import json
import os
import sys
import web
try:
from PySide import QtCore
from PySide import QtGui
except ImportError:
from PyQt4 import QtCore
from PyQt4 import QtGui
app_name = "foo"
#tmp_path = os.getenv("TMP") or "/tmp"
PWD = os.path.dirname(os.path.realpath(__file__))
tmp_path = PWD
app_data_path = os.path.join(tmp_path, app_name)
class AutoSaveGeo(QtGui.QWidget):
def __init__(self, w = 300, h = 500, parent = None, user_data_path = None):
super(AutoSaveGeo, self).__init__(parent)
self.resize(w, h)
self.user_data_path = user_data_path
if self.user_data_path:
self._load_win_geo()
def closeEvent(self, evt):
if hasattr(self, "user_data_path") and self.user_data_path:
self._save_win_geo()
return super(AutoSaveGeo, self).closeEvent(evt)
def _save_win_geo(self):
config_path = os.path.join(self.user_data_path, "win_geometry.json")
if not os.path.exists(self.user_data_path):
os.makedirs(self.user_data_path)
if os.path.exists(config_path):
f = file(config_path)
buf = f.read()
f.close()
else:
buf = None
datas = None
if buf:
datas = json.loads(buf)
if not datas:
datas = {}
win_geo_data = dict(
x = self.x(),
y = self.y(),
w = self.width(),
h = self.height())
datas[self.__class__.__name__] = win_geo_data
buf = json.dumps(datas)
web.utils.safewrite(config_path, buf)
def _load_win_geo(self):
config_path = os.path.join(self.user_data_path, "win_geometry.json")
if not os.path.exists(self.user_data_path):
os.makedirs(self.user_data_path)
desktop = QtGui.QApplication.desktop()
x = desktop.width() / 2
y = (desktop.height() - self.height()) / 2
w = self.width()
h = self.height()
if os.path.exists(config_path):
f = file(config_path)
buf = f.read()
f.close()
else:
buf = None
datas = None
if buf:
datas = json.loads(buf)
if datas:
cls_name = self.__class__.__name__
geo = datas.get(cls_name)
if geo:
x, y, w, h = geo['x'], geo['y'], geo['w'], geo['h']
self.setGeometry(x, y, w, h)
class Demo(AutoSaveGeo):
def __init__(self, parent = None, user_data_path = None):
super(Demo, self).__init__(parent = parent, user_data_path = user_data_path)
def show_and_raise(self):
self.show()
self.raise_()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
demo = Demo(user_data_path = app_data_path)
demo.show_and_raise()
sys.exit(app.exec_())
| {
"content_hash": "f11feaa69a8774878836de6aab6f27c1",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 84,
"avg_line_length": 24.031007751937985,
"alnum_prop": 0.5441935483870968,
"repo_name": "alexlib/Qt-Python-Binding-Examples",
"id": "3f04f6013ca3dcfc4856dee24db305cc7dcdcde8",
"size": "3122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "windows/auto_save_win_geometry.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "251904"
}
],
"symlink_target": ""
} |
"""Date range parser."""
import datetime
import dateutil.parser
import re
import operator
__all__ = ('date_in_range',)
CLAUSE = re.compile('(year|month|day|hour|minute|second|weekday|isoweekday)?'
' ?(==|!=|<=|>=|<|>) ?(.*)')
OPERATORS = {
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
'>=': operator.ge,
'<': operator.lt,
'>': operator.gt,
}
def date_in_range(date_range, date, debug=False, now=None):
"""Check if date is in the range specified.
Format:
* comma-separated clauses (AND)
* clause: attribute comparison_operator value (spaces optional)
* attribute: year, month, day, hour, month, second, weekday, isoweekday
or empty for full datetime
* comparison_operator: == != <= >= < >
* value: integer, 'now', 'today', or dateutil-compatible date input
The optional `now` parameter can be used to provide a specific `now`/`today` value
(if none is provided, datetime.datetime.now()/datetime.date.today() is used).
"""
out = True
for item in date_range.split(','):
attribute, comparison_operator, value = CLAUSE.match(
item.strip()).groups()
if attribute in ('weekday', 'isoweekday'):
left = getattr(date, attribute)()
right = int(value)
elif value == 'now':
left = date
right = now or datetime.datetime.now()
elif value == 'today':
left = date.date() if isinstance(date, datetime.datetime) else date
if now:
right = now.date() if isinstance(now, datetime.datetime) else now
else:
right = datetime.date.today()
elif attribute:
left = getattr(date, attribute)
right = int(value)
else:
left = date
right = dateutil.parser.parse(value)
if debug: # pragma: no cover
print(" <{0} {1} {2}>".format(left, comparison_operator, right))
out = out and OPERATORS[comparison_operator](left, right)
return out
| {
"content_hash": "948f9869746824f81d7fec0f60bad5a7",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 86,
"avg_line_length": 33.74193548387097,
"alnum_prop": 0.5697896749521989,
"repo_name": "getnikola/nikola",
"id": "4e9566c09cd6166abc62037a8711d6186c538293",
"size": "3749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nikola/packages/datecond/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34036"
},
{
"name": "HTML",
"bytes": "239"
},
{
"name": "JavaScript",
"bytes": "2076"
},
{
"name": "Jupyter Notebook",
"bytes": "568"
},
{
"name": "Python",
"bytes": "1299776"
},
{
"name": "Shell",
"bytes": "9704"
},
{
"name": "XSLT",
"bytes": "3619"
}
],
"symlink_target": ""
} |
class GlobalConfig(object):
def __init__(self):
self.prompt_until_exit = True # If True in verbose the user will be prompted until s/he chooses to exit
self.article_limit = 10 # By default only 10 stories will be displayed, future version will allow this to be set by CLI argument
self.headline_color = 'cyan'
self.source_color = 'magenta'
self.section_color = 'red'
self.abstract_color = 'yellow'
self.prompt_color = 'green'
class NewsFeedConfig(object):
"""Configure options for the newsfeeds"""
def __init__(self):
self.limit = 20 # Number of articles to be returned by query. Most feeds are limited to 20, some return more.
| {
"content_hash": "9bfdc695bfaf85ad931dce5f59112115",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 136,
"avg_line_length": 44.3125,
"alnum_prop": 0.6629055007052186,
"repo_name": "haaspt/whatsnew",
"id": "3dac073d9bbfe60fd4eda6b1c51fd2c8598678d1",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7949"
}
],
"symlink_target": ""
} |
from docfx_yaml import extension
import unittest
from parameterized import parameterized
from yaml import load, Loader
class TestGenerate(unittest.TestCase):
def test_find_unique_name(self):
entries = {}
# Disambiguate with unique entries.
entry1 = "google.cloud.aiplatform.v1.schema.predict.instance_v1.types"
entry2 = "google.cloud.aiplatform.v1beta2.schema.predict.instance_v1.types"
want1 = "v1.types"
want2 = "v1beta2.types"
for entry in [entry1, entry2]:
for word in entry.split("."):
if word not in entries:
entries[word] = 1
else:
entries[word] += 1
got1 = extension.find_unique_name(entry1.split("."), entries)
got2 = extension.find_unique_name(entry2.split("."), entries)
self.assertEqual(want1, ".".join(got1))
self.assertEqual(want2, ".".join(got2))
def test_disambiguate_toc_name(self):
with open('tests/yaml_post.yaml', 'r') as want_file:
yaml_want = load(want_file, Loader=Loader)
disambiguated_names_want = {
'google.cloud.spanner_admin_database_v1.types': 'spanner_admin_database_v1.types',
'google.cloud.spanner_admin_instance_v1.types': 'spanner_admin_instance_v1.types',
'google.cloud.spanner_v1.types': 'spanner_v1.types'
}
with open('tests/yaml_pre.yaml', 'r') as test_file:
yaml_got = load(test_file, Loader=Loader)
disambiguated_names_got = extension.disambiguate_toc_name(yaml_got)
self.assertEqual(yaml_want, yaml_got)
self.assertEqual(disambiguated_names_want, disambiguated_names_got)
def test_disambiguate_toc_name_duplicate(self):
with open('tests/yaml_post_duplicate.yaml', 'r') as want_file:
yaml_want = load(want_file, Loader=Loader)
disambiguated_names_want = {
'google.api_core.client_info': 'client_info',
'google.api_core.gapic_v1.client_info': 'gapic_v1.client_info'
}
with open('tests/yaml_pre_duplicate.yaml', 'r') as test_file:
yaml_got = load(test_file, Loader=Loader)
disambiguated_names_got = extension.disambiguate_toc_name(yaml_got)
self.assertEqual(yaml_want, yaml_got)
self.assertEqual(disambiguated_names_want, disambiguated_names_got)
def test_reference_in_summary(self):
lines_got = """
If a ``stream`` is attached to this download, then the downloaded
resource will be written to the stream.
Args:
transport (~google.cloud.requests.Session): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, Tuple[float, float]]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`google.cloud.requests.Session.request` documentation for details.
Returns:
~google.cloud.requests.Response: The HTTP response returned by ``transport``.
Raises:
~google.cloud.resumable_media.common.DataCorruption: If the download's
checksum doesn't agree with server-computed checksum.
ValueError: If the current :class:`Download` has already
finished.
"""
lines_got = lines_got.split("\n")
xrefs_got = []
# Resolve over different regular expressions for different types of reference patterns.
lines_got, xrefs = extension._resolve_reference_in_module_summary(extension.REF_PATTERN, lines_got)
for xref in xrefs:
xrefs_got.append(xref)
lines_got, xrefs = extension._resolve_reference_in_module_summary(extension.REF_PATTERN_LAST, lines_got)
for xref in xrefs:
xrefs_got.append(xref)
lines_want = """
If a ``stream`` is attached to this download, then the downloaded
resource will be written to the stream.
Args:
transport (<xref uid="google.cloud.requests.Session">Session</xref>): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, Tuple[float, float]]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as a tuple (connect_timeout, read_timeout).
See <xref uid="google.cloud.requests.Session.request">request</xref> documentation for details.
Returns:
<xref uid="google.cloud.requests.Response">Response</xref>: The HTTP response returned by ``transport``.
Raises:
<xref uid="google.cloud.resumable_media.common.DataCorruption">DataCorruption</xref>: If the download's
checksum doesn't agree with server-computed checksum.
ValueError: If the current `Download` has already
finished.
"""
lines_want = lines_want.split("\n")
xrefs_want = [
"google.cloud.requests.Session",
"google.cloud.requests.Session.request",
"google.cloud.requests.Response",
"google.cloud.resumable_media.common.DataCorruption"
]
self.assertEqual(lines_got, lines_want)
self.assertCountEqual(xrefs_got, xrefs_want)
# assertCountEqual is a misleading name but checks that two lists contain
# same items regardless of order, as long as items in list are sortable.
# Test for added xref coverage and third party xrefs staying as-is
def test_reference_in_summary_more_xrefs(self):
lines_got = """
If a ~dateutil.time.stream() is attached to this download, then the downloaded
resource will be written to the stream.
Args:
transport (~google.cloud.requests.Session()): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, Tuple[float, float]]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as a :func:`~google.cloud.requests.tuple()` (connect_timeout, read_timeout).
See :meth:`google.cloud.requests.Session.request()` documentation for details.
"""
lines_got = lines_got.split("\n")
xrefs_got = []
# Resolve over different regular expressions for different types of reference patterns.
lines_got, xrefs = extension._resolve_reference_in_module_summary(extension.REF_PATTERN, lines_got)
for xref in xrefs:
xrefs_got.append(xref)
lines_got, xrefs = extension._resolve_reference_in_module_summary(extension.REF_PATTERN_LAST, lines_got)
for xref in xrefs:
xrefs_got.append(xref)
lines_want = """
If a `dateutil.time.stream()` is attached to this download, then the downloaded
resource will be written to the stream.
Args:
transport (<xref uid="google.cloud.requests.Session">Session()</xref>): A ``requests`` object which can
make authenticated requests.
timeout (Optional[Union[float, Tuple[float, float]]]):
The number of seconds to wait for the server response.
Depending on the retry strategy, a request may be repeated
several times using the same timeout each time.
Can also be passed as a <xref uid="google.cloud.requests.tuple">tuple()</xref> (connect_timeout, read_timeout).
See <xref uid="google.cloud.requests.Session.request">request()</xref> documentation for details.
"""
lines_want = lines_want.split("\n")
xrefs_want = [
"google.cloud.requests.Session",
"google.cloud.requests.tuple",
"google.cloud.requests.Session.request"
]
self.assertEqual(lines_got, lines_want)
self.assertCountEqual(xrefs_got, xrefs_want)
# assertCountEqual is a misleading name but checks that two lists contain
# same items regardless of order, as long as items in list are sortable.
# Variables used for testing _extract_docstring_info
top_summary1_want = "\nSimple test for docstring.\n\n"
summary_info1_want = {
'variables': {
'arg1': {
'var_type': 'int',
'description': 'simple description.'
},
'arg2': {
'var_type': 'str',
'description': 'simple description for `arg2`.'
}
},
'returns': [
{
'var_type': 'str',
'description': 'simple description for return value.'
}
],
'exceptions': [
{
'var_type': 'AttributeError',
'description': 'if `condition x`.'
}
]
}
# Test for resolving square bracketed references.
def test_reference_square_brackets(self):
xrefs_want = [
'google.cloud.kms.v1.KeyRing.name',
'google.cloud.kms.v1.KeyRing',
'google.cloud.kms.v1.ImportJob',
]
summary_want = """Required.
The <xref uid="google.cloud.kms.v1.KeyRing.name">name</xref> of the <xref uid="google.cloud.kms.v1.KeyRing">KeyRing</xref> associated with the <xref uid="google.cloud.kms.v1.ImportJob">ImportJobs</xref>.
"""
summary_want = summary_want.split("\n")
summary = """Required.
The [name][google.cloud.kms.v1.KeyRing.name] of the [KeyRing][google.cloud.kms.v1.KeyRing] associated with the [ImportJobs][google.cloud.kms.v1.ImportJob].
"""
summary = summary.split("\n")
summary_got, xrefs_got = extension._resolve_reference_in_module_summary(extension.REF_PATTERN_BRACKETS, summary)
self.assertEqual(summary_got, summary_want)
self.assertCountEqual(xrefs_got, xrefs_want)
# Check that other patterns throws an exception.
def test_reference_check_error(self):
with self.assertRaises(ValueError):
extension._resolve_reference_in_module_summary('.*', 'not a valid ref line'.split('\n'))
def test_extract_docstring_info_normal_input(self):
## Test for normal input
summary_info1_got = {
'variables': {},
'returns': [],
'exceptions': []
}
summary1 = """
Simple test for docstring.
Args:
arg1(int): simple description.
arg2(str): simple description for `arg2`.
Returns:
str: simple description for return value.
Raises:
AttributeError: if `condition x`.
"""
top_summary1_got = extension._extract_docstring_info(summary_info1_got, summary1, "")
self.assertEqual(top_summary1_got, self.top_summary1_want)
self.assertEqual(summary_info1_got, self.summary_info1_want)
def test_extract_docstring_info_mixed_format(self):
## Test for input coming in mixed format.
summary2 = """
Simple test for docstring.
:type arg1: int
:param arg1: simple description.
:param arg2: simple description for `arg2`.
:type arg2: str
:rtype: str
:returns: simple description for return value.
:raises AttributeError: if `condition x`.
"""
summary_info2_got = {
'variables': {},
'returns': [],
'exceptions': []
}
top_summary2_got = extension._extract_docstring_info(summary_info2_got, summary2, "")
# Output should be same as test 1 with normal input.
self.assertEqual(top_summary2_got, self.top_summary1_want)
self.assertEqual(summary_info2_got, self.summary_info1_want)
def test_extract_docstring_info_check_parser(self):
## Test for parser to correctly scan docstring tokens and not custom fields
summary_info3_want = {
'variables': {},
'returns': [],
'exceptions': []
}
summary3 = """
Union[int, None]: Expiration time in milliseconds for a partition.
If :attr:`partition_expiration` is set and <xref:type_> is
not set, :attr:`type_` will default to
:attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`.
It could return :param: with :returns as well.
"""
summary_info3_got = {
'variables': {},
'returns': [],
'exceptions': []
}
# Nothing should change
top_summary3_want = summary3
top_summary3_got = extension._extract_docstring_info(summary_info3_got, summary3, "")
self.assertEqual(top_summary3_got, top_summary3_want)
self.assertEqual(summary_info3_got, summary_info3_want)
def test_extract_docstring_info_check_error(self):
## Test for incorrectly formmatted docstring raising error
summary4 = """
Description of docstring which should fail.
:returns:param:
"""
with self.assertRaises(ValueError):
extension._extract_docstring_info({}, summary4, "error string")
summary5 = """
Description of malformed docstring.
Raises:
Error that should fail: if condition `x`.
"""
with self.assertRaises(KeyError):
extension._extract_docstring_info({}, summary5, "malformed docstring")
def test_extract_docstring_info_with_xref(self):
## Test with xref included in the summary, ensure they're processed as-is
summary_info_want = {
'variables': {
'arg1': {
'var_type': '<xref uid="google.spanner_v1.type.Type">Type</xref>',
'description': 'simple description.'
},
'arg2': {
'var_type': '~google.spanner_v1.type.dict',
'description': 'simple description for `arg2`.'
}
},
'returns': [
{
'var_type': '<xref uid="Pair">Pair</xref>',
'description': 'simple description for return value.'
}
],
'exceptions': [
{
'var_type': '<xref uid="SpannerException">SpannerException</xref>',
'description': 'if `condition x`.'
}
]
}
summary = """
Simple test for docstring.
:type arg1: <xref uid="google.spanner_v1.type.Type">Type</xref>
:param arg1: simple description.
:param arg2: simple description for `arg2`.
:type arg2: ~google.spanner_v1.type.dict
:rtype: <xref uid="Pair">Pair</xref>
:returns: simple description for return value.
:raises <xref uid="SpannerException">SpannerException</xref>: if `condition x`.
"""
summary_info_got = {
'variables': {},
'returns': [],
'exceptions': []
}
top_summary_got = extension._extract_docstring_info(summary_info_got, summary, "")
# Same as the top summary from previous example, compare with that
self.assertEqual(top_summary_got, self.top_summary1_want)
self.assertDictEqual(summary_info_got, summary_info_want)
def test_extract_docstring_info_no_summary(self):
## Test parsing docstring with no summary.
summary =(
"""Args:
arg1(int): simple description.
arg2(str): simple description for `arg2`.
Returns:
str: simple description for return value.
Raises:
AttributeError: if `condition x`.
"""
)
summary_info_got = {
'variables': {},
'returns': [],
'exceptions': []
}
top_summary_got = extension._extract_docstring_info(summary_info_got, summary, "")
self.assertEqual(top_summary_got, "")
self.assertDictEqual(summary_info_got, self.summary_info1_want)
def test_find_package_group(self):
package_group_want = "google.cloud.spanner_v1beta2"
uid = "google.cloud.spanner_v1beta2.services.admin_database_v1.types"
package_group_got = extension.find_package_group(uid)
self.assertEqual(package_group_got, package_group_want)
def test_pretty_package_name(self):
package_name_want = "Spanner V1beta2"
package_group = "google.cloud.spanner_v1beta2"
package_name_got = extension.pretty_package_name(package_group)
self.assertEqual(package_name_got, package_name_want)
def test_group_by_package(self):
toc_yaml_want = [
{
"name": "Spanner Admin Database V1",
"uidname":"google.cloud.spanner_admin_database_v1",
"items": [
{
"name":"database_admin",
"uidname":"google.cloud.spanner_admin_database_v1.services.database_admin",
"items":[
{
"name":"Overview",
"uidname":"google.cloud.spanner_admin_database_v1.services.database_admin",
"uid":"google.cloud.spanner_admin_database_v1.services.database_admin"
},
{
"name":"ListBackupOperationsAsyncPager",
"uidname":"google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupOperationsAsyncPager",
"uid":"google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupOperationsAsyncPager"
}
]
},
{
"name":"spanner_admin_database_v1.types",
"uidname":"google.cloud.spanner_admin_database_v1.types",
"items":[
{
"name":"Overview",
"uidname":"google.cloud.spanner_admin_database_v1.types",
"uid":"google.cloud.spanner_admin_database_v1.types"
},
{
"name":"BackupInfo",
"uidname":"google.cloud.spanner_admin_database_v1.types.BackupInfo",
"uid":"google.cloud.spanner_admin_database_v1.types.BackupInfo"
}
]
},
]
},
{
"name": "Spanner V1",
"uidname":"google.cloud.spanner_v1",
"items": [
{
"name":"pool",
"uidname":"google.cloud.spanner_v1.pool",
"items":[
{
"name":"Overview",
"uidname":"google.cloud.spanner_v1.pool",
"uid":"google.cloud.spanner_v1.pool"
},
{
"name":"AbstractSessionPool",
"uidname":"google.cloud.spanner_v1.pool.AbstractSessionPool",
"uid":"google.cloud.spanner_v1.pool.AbstractSessionPool"
}
]
}
]
}
]
toc_yaml = [
{
"name":"database_admin",
"uidname":"google.cloud.spanner_admin_database_v1.services.database_admin",
"items":[
{
"name":"Overview",
"uidname":"google.cloud.spanner_admin_database_v1.services.database_admin",
"uid":"google.cloud.spanner_admin_database_v1.services.database_admin"
},
{
"name":"ListBackupOperationsAsyncPager",
"uidname":"google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupOperationsAsyncPager",
"uid":"google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupOperationsAsyncPager"
}
]
},
{
"name":"spanner_admin_database_v1.types",
"uidname":"google.cloud.spanner_admin_database_v1.types",
"items":[
{
"name":"Overview",
"uidname":"google.cloud.spanner_admin_database_v1.types",
"uid":"google.cloud.spanner_admin_database_v1.types"
},
{
"name":"BackupInfo",
"uidname":"google.cloud.spanner_admin_database_v1.types.BackupInfo",
"uid":"google.cloud.spanner_admin_database_v1.types.BackupInfo"
}
]
},
{
"name":"pool",
"uidname":"google.cloud.spanner_v1.pool",
"items":[
{
"name":"Overview",
"uidname":"google.cloud.spanner_v1.pool",
"uid":"google.cloud.spanner_v1.pool"
},
{
"name":"AbstractSessionPool",
"uidname":"google.cloud.spanner_v1.pool.AbstractSessionPool",
"uid":"google.cloud.spanner_v1.pool.AbstractSessionPool"
}
]
}
]
toc_yaml_got = extension.group_by_package(toc_yaml)
self.assertCountEqual(toc_yaml_got, toc_yaml_want)
def test_parse_docstring_summary(self):
# Check that the summary gets parsed correctly.
attributes_want = []
summary_want = \
"""```python
from google.api_core.client_options import ClientOptions
from google.cloud.vision_v1 import ImageAnnotatorClient
def get_client_cert():
# code to load client certificate and private key.
return client_cert_bytes, client_private_key_bytes
options = ClientOptions(api_endpoint=\"foo.googleapis.com\",
client_cert_source=get_client_cert)
client = ImageAnnotatorClient(client_options=options)
```
You can also pass a mapping object.
```ruby
from google.cloud.vision_v1 import ImageAnnotatorClient
client = ImageAnnotatorClient(
client_options={
\"api_endpoint\": \"foo.googleapis.com\",
\"client_cert_source\" : get_client_cert
})
```
"""
summary = \
"""
.. code-block:: python
\n from google.api_core.client_options import ClientOptions
\n from google.cloud.vision_v1 import ImageAnnotatorClient
\n def get_client_cert():
\n # code to load client certificate and private key.
\n return client_cert_bytes, client_private_key_bytes
\n options = ClientOptions(api_endpoint=\"foo.googleapis.com\",
\n client_cert_source=get_client_cert)
\n client = ImageAnnotatorClient(client_options=options)
You can also pass a mapping object.
\n.. code-block:: ruby
\n from google.cloud.vision_v1 import ImageAnnotatorClient
\n client = ImageAnnotatorClient(
\n client_options={
\n \"api_endpoint\": \"foo.googleapis.com\",
\n \"client_cert_source\" : get_client_cert
\n })
"""
summary_got, attributes_got = extension._parse_docstring_summary(summary)
self.assertEqual(summary_got, summary_want)
self.assertEqual(attributes_got, attributes_want)
# Check that nothing much changes otherwise.
summary = \
"""
.. literalinclude::
note that these are not supported yet, so they will be ignored for now.
And any other documentation that the source code would have could go here.
"""
summary_want = summary + "\n"
summary_got, attributes_got = extension._parse_docstring_summary(summary)
self.assertEqual(summary_got, summary_want)
self.assertEqual(attributes_got, attributes_want)
# Check that exception is raised if code block is not indented.
summary = \
"""
.. code:: python
\nprint("This should throw an exception.")
\nfor i in range(10):
\n print(i)
"""
with self.assertRaises(ValueError):
extension._parse_docstring_summary(summary)
# Check that notices are processed properly.
summary_want = \
"""<aside class="note">
<b>Note:</b>
this is a note.
</aside>
<aside class="caution">
<b>Caution:</b>
another type of notice.
</aside>
<aside class="key-term">
<b>Key Term:</b>
hyphenated term notice.
</aside>"""
summary = \
"""
.. note::
\n this is a note.
.. caution::
\n another type of notice.
.. key-term::
\n hyphenated term notice.
"""
summary_got, attributes_got = extension._parse_docstring_summary(summary)
self.assertEqual(summary_got, summary_want)
self.assertEqual(attributes_got, attributes_want)
# Check that exception is raised if block is not formatted properly.
summary = \
"""
.. warning::
this is not a properly formatted warning.
"""
with self.assertRaises(ValueError):
extension._parse_docstring_summary(summary)
def test_parse_docstring_summary_attributes(self):
# Test parsing docstring with attributes.
attributes_want = [
{
"id": "simple name",
"description": "simple description",
"var_type": 'str'
}
]
summary = \
"""
.. attribute:: simple name
\nsimple description
\n:type: str
"""
summary_got, attributes_got = extension._parse_docstring_summary(summary)
self.assertCountEqual(attributes_got, attributes_want)
# Check multiple attributes are parsed.
attributes_want = [
{
"id": "simple name",
"description": "simple description",
"var_type": "str"
},
{
"id": "table_insert_request",
"description": "Table insert request.",
"var_type": "google.cloud.bigquery_logging_v1.types.TableInsertRequest"
}
]
summary = \
"""
.. attribute:: simple name
\nsimple description
\n:type: str
.. attribute:: table_insert_request
\nTable insert request.
\n:type: google.cloud.bigquery_logging_v1.types.TableInsertRequest
"""
summary_got, attributes_got = extension._parse_docstring_summary(summary)
self.assertCountEqual(attributes_got, attributes_want)
for attribute_got, attribute_want in zip(attributes_got, attributes_want):
self.assertDictEqual(attribute_got, attribute_want)
# Check only attributes in valid format gets parsed.
attributes_want = [
{
"id": "proper name",
"description": "proper description.",
"var_type": "str"
}
]
summary = \
"""
.. attribute:: table_insert_request
\nTable insert request.
\ntype: google.cloud.bigquery_logging_v1.types.TableInsertRequest
.. attribute:: proper name
\nproper description.
\n:type: str
"""
summary_got, attributes_got = extension._parse_docstring_summary(summary)
# Check that we are returned only one item.
self.assertCountEqual(attributes_got, attributes_want)
for attribute_got, attribute_want in zip(attributes_got, attributes_want):
self.assertDictEqual(attribute_got, attribute_want)
def test_merge_markdown_and_package_toc(self):
known_uids = {'acl','batch','blob','client','constants','fileio','hmac_key','notification','retry'}
markdown_pages = {
'storage': [
{'name': 'FileIO', 'href': 'fileio.md'},
{'name': 'Retry', 'href': 'retry.md'},
{'name': 'Notification', 'href': 'notification.md'},
{'name': 'HMAC Key Metadata', 'href': 'hmac_key.md'},
{'name': 'Batches', 'href': 'batch.md'},
{'name': 'Constants', 'href': 'constants.md'},
{'name': 'Storage Client', 'href': 'client.md'},
{'name': 'Blobs / Objects', 'href': 'blobs.md'}
],
'acl': [
{'name': 'ACL', 'href': 'acl.md'},
{'name': 'ACL guide', 'href': 'acl_guide.md'}
],
'/': [
{'name': 'Overview', 'href': 'index.md'},
{'name': 'Changelog', 'href': 'changelog.md'}
],
}
pkg_toc_yaml = [
{'name': 'Storage',
'items': [
{'name': 'acl', 'uid': 'google.cloud.storage.acl', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.acl'}]},
{'name': 'batch', 'uid': 'google.cloud.storage.batch', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.batch'}]},
{'name': 'blob', 'uid': 'google.cloud.storage.blob', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.blob'}]},
{'name': 'bucket', 'uid': 'google.cloud.storage.bucket', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.bucket'}]},
{'name': 'client', 'uid': 'google.cloud.storage.client', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.client'}]},
{'name': 'constants', 'uid': 'google.cloud.storage.constants'},
{'name': 'fileio', 'uid': 'google.cloud.storage.fileio', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.fileio'}]},
{'name': 'hmac_key', 'uid': 'google.cloud.storage.hmac_key', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.hmac_key'}]},
{'name': 'notification', 'uid': 'google.cloud.storage.notification', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.notification'}]},
{'name': 'retry', 'uid': 'google.cloud.storage.retry', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.retry'}]},
]
},
]
added_pages, merged_pkg_toc_yaml = extension.merge_markdown_and_package_toc(
pkg_toc_yaml, markdown_pages, known_uids)
expected_added_pages = {'index.md', 'changelog.md', 'blobs.md', 'acl_guide.md'}
expected_merged_pkg_toc_yaml = [
{'name': 'Overview', 'href': 'index.md'},
{'name': 'Changelog', 'href': 'changelog.md'},
{'name': 'Storage',
'items': [
{'name': 'Blobs / Objects', 'href': 'blobs.md'},
{'name': 'acl', 'uid': 'google.cloud.storage.acl', 'items': [
{'name': 'ACL guide', 'href': 'acl_guide.md'},
{'name': 'Overview', 'uid': 'google.cloud.storage.acl'},
]},
{'name': 'batch', 'uid': 'google.cloud.storage.batch', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.batch'}]},
{'name': 'blob', 'uid': 'google.cloud.storage.blob', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.blob'}]},
{'name': 'bucket', 'uid': 'google.cloud.storage.bucket', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.bucket'}]},
{'name': 'client', 'uid': 'google.cloud.storage.client', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.client'}]},
{'name': 'constants', 'uid': 'google.cloud.storage.constants'},
{'name': 'fileio', 'uid': 'google.cloud.storage.fileio', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.fileio'}]},
{'name': 'hmac_key', 'uid': 'google.cloud.storage.hmac_key', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.hmac_key'}]},
{'name': 'notification', 'uid': 'google.cloud.storage.notification', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.notification'}]},
{'name': 'retry', 'uid': 'google.cloud.storage.retry', 'items': [{'name': 'Overview', 'uid': 'google.cloud.storage.retry'}]},
]
},
]
self.assertSetEqual(added_pages, expected_added_pages)
self.assertListEqual(merged_pkg_toc_yaml, expected_merged_pkg_toc_yaml)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "1181772448c8cd08042c0f6461f2b78c",
"timestamp": "",
"source": "github",
"line_count": 889,
"max_line_length": 203,
"avg_line_length": 36.36220472440945,
"alnum_prop": 0.5753572975313989,
"repo_name": "googleapis/sphinx-docfx-yaml",
"id": "ce5870bbfb379c3a69fda7cf7af409631344c774",
"size": "32326",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_unit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1964"
},
{
"name": "Python",
"bytes": "220912"
},
{
"name": "Shell",
"bytes": "29757"
}
],
"symlink_target": ""
} |
import logging
import sys
from django.urls import reverse
from desktop.lib.exceptions import StructuredException
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import force_unicode, smart_str
from desktop.lib.rest.http_client import RestException
from notebook.connectors.base import Api, QueryError, QueryExpired, OperationTimeout, OperationNotSupported
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
try:
from beeswax.api import _autocomplete
from beeswax.server import dbms
from beeswax.server.dbms import get_query_server_config, QueryServerException
except ImportError as e:
LOG.warning('Hive and HiveMetastoreServer interfaces are not enabled: %s' % e)
hive_settings = None
def query_error_handler(func):
def decorator(*args, **kwargs):
try:
return func(*args, **kwargs)
except StructuredException as e:
message = force_unicode(str(e))
if 'timed out' in message:
raise OperationTimeout(e)
else:
raise QueryError(message)
except QueryServerException as e:
message = force_unicode(str(e))
if 'Invalid query handle' in message or 'Invalid OperationHandle' in message:
raise QueryExpired(e)
else:
raise QueryError(message)
return decorator
class HiveMetastoreApi(Api):
@query_error_handler
def autocomplete(self, snippet, database=None, table=None, column=None, nested=None, operation=None):
db = self._get_db(snippet, cluster=self.cluster)
return _autocomplete(db, database, table, column, nested, query=None, cluster=self.cluster)
@query_error_handler
def get_sample_data(self, snippet, database=None, table=None, column=None, is_async=False, operation=None):
return []
def _get_db(self, snippet, is_async=False, cluster=None):
return dbms.get(self.user, query_server=get_query_server_config(name='hms', cluster=cluster))
| {
"content_hash": "e961dd165d22a6546f5f3ef48a4d2999",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 109,
"avg_line_length": 31.26153846153846,
"alnum_prop": 0.7386811023622047,
"repo_name": "kawamon/hue",
"id": "0d56af630f2c49ab0e3b5548394dc6fa655bdc50",
"size": "2824",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/libs/notebook/src/notebook/connectors/hive_metastore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
} |
import requests
from time import time
from shutil import copyfileobj
from multiprocessing import Pool
from urllib.parse import urlparse
import sys
import traceback
# Downloader accepts a crawler and uses it to
# download files.
class Downloader:
def __init__(self, base_dir):
self.base_dir = base_dir
# TODO: use Rlock to name incrementally rather than brute hash
@staticmethod
def generate_pic_name(pic_url):
pic_name = str(hash(pic_url)) + "_" + urlparse(pic_url).path.split('/')[-1]
return pic_name
def log_urls(self, downloaded_imgs):
log_loc = self.base_dir + "imglog_" + str(time()) + ".log"
with open(log_loc, 'w') as log_file:
log_file.write("\n".join(downloaded_imgs))
return log_loc
@staticmethod
def log_msg(msg):
print(msg)
def download_pic(self, pic_url):
try:
response = requests.get(pic_url, stream=True)
if response.status_code == 200:
name = self.generate_pic_name(pic_url)
image_loc = self.base_dir + '/' + name
with open(image_loc, 'wb') as out_file:
out_file.write(response.content)
self.log_msg("downloaded " + pic_url)
else:
print("Download failed. Response: " + str(response.status_code))
except:
image_loc = ""
self.log_msg("Failed to download image: " + pic_url + "\n" )
exc_type, exc_value, exc_traceback = sys.exc_info()
print("*** print_tb:")
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print("*** print_exception:")
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
return image_loc
def download_batch(self, pic_urls, parallelism=4):
downloader_pool = Pool(parallelism)
results = downloader_pool.map(self.download_pic, pic_urls)
results = list(filter(lambda loc: loc == "", results)) # remove invalid locations
self.log_urls(results)
return results
| {
"content_hash": "1d2c188058f6e3997165d6f2eb4ea6c0",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 90,
"avg_line_length": 37.327586206896555,
"alnum_prop": 0.584757505773672,
"repo_name": "apoorv-kumar/PyThugLife",
"id": "4e3793678721c0af3743b5d8adf61a928975e9d4",
"size": "2165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PicDownloader/downloader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32544"
}
],
"symlink_target": ""
} |
from exception import *
from constants import *
from utils import *
| {
"content_hash": "ce1fa563d1c19f718fee5b90949609d4",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 23,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.7794117647058824,
"repo_name": "kbandla/phoneypdf",
"id": "567ed815539fd173233fbde6425349b74fcb4462",
"size": "68",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pdf/utils/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "86886"
},
{
"name": "Python",
"bytes": "261806"
}
],
"symlink_target": ""
} |
import re
import sys
import json
import socket
from subprocess import check_output
from mpd import MPDClient
import humanize
def get_governor():
"""Get the current governor for cpu0, assuming all CPUs use the same."""
with open("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor") as fp:
return fp.readlines()[0].strip()
def dropbox_installed():
"""Check if dropbox is running"""
try:
check_output(["dropbox", "status"])
return True
except:
return False
def get_dropbox_status():
"""Get current status for dropbox."""
try:
out = check_output(["dropbox", "status"])
return out.strip().replace("\n", "|")
except:
return "not installed"
def get_current_kbmap():
"""Get current keyboard layout"""
try:
out = check_output(["setxkbmap", "-print"]).decode("utf-8")
for line in out.splitlines():
if line.find("symbols") > 0:
_, layout, _ = line.split("+", 2)
return layout
return ""
except:
return "cannot parse kbd layout"
def get_current_nuvola_song():
"""Get nuvolaplayer status"""
def str_to_bool(input):
return {"true": True, "false": False}.get(input, False)
try:
artist = check_output(["nuvolaplayer3ctl", "track-info", "artist"]).strip()
title = check_output(["nuvolaplayer3ctl", "track-info", "title"]).strip()
state = check_output(["nuvolaplayer3ctl", "track-info", "state"]).strip()
thumbs_up = check_output(
["nuvolaplayer3ctl", "action-state", "thumbs-up"]
).strip()
thumbsym = "(+)" if str_to_bool(thumbs_up) else ""
if state == "playing" or state == "paused":
return "[%s]%s %s - %s" % (state, thumbsym, artist, title)
else:
return "[n/a]"
except:
return "[n/a]"
pass
mpd_client = None
def mpd_reconnect():
# open mpd connection
global mpd_client
mpd_client = MPDClient()
mpd_client.timeout = 5
mpd_client.idletimeout = None
try:
mpd_client.connect("localhost", 6600)
except:
print("could not connect", file=sys.stderr)
return False
print("mpd reconnected", file=sys.stderr)
return True
def get_mpd_song():
"""Get infos from mpd"""
global mpd_client
try:
mpd_client.ping()
except:
c = mpd_reconnect()
if not c:
return None
try:
mpd_client.command_list_ok_begin()
mpd_client.update()
mpd_client.status()
mpd_client.currentsong()
results = mpd_client.command_list_end()
state = results[1]["state"]
artist = None
if "artist" in list(results[2].keys()):
artist = results[2]["artist"]
if isinstance(artist, list):
artist = ", ".join(artist)
title = None
if "title" in list(results[2].keys()):
title = results[2]["title"]
if artist is None and title is not None:
# assume #Musik stream
artist, title = title.split(" | ")[0].split(" - ")
if state == "play" or state == "pause":
return "[%s] %s - %s" % (state, artist, title)
else:
return None
except:
return None
pass
_prev = None
def get_gpmdp_song():
"""Get song from Google Play Desktop Music Player"""
import os
global _prev
_json_info_path = os.path.join(
os.environ["HOME"],
".config",
"Google Play Music Desktop Player",
"json_store",
"playback.json",
)
try:
with open(_json_info_path, "r") as _info_fh:
_info = json.load(_info_fh)
if not _info["song"]["title"]:
_prev = None
return None
state = "play" if _info["playing"] else "paused"
_prev = "[%s] %s - %s" % (
state,
_info["song"]["artist"],
_info["song"]["title"],
)
return _prev
except:
return _prev
def _libvirt_get_running_vms():
"""Get number of currently running libvirt VMs"""
try:
import libvirt
except:
print("No libvirt installed", file=sys.stderr)
return None
conn = None
try:
conn = libvirt.openReadOnly(None)
except:
return None
if conn == None:
return None
return "libvirt VMs: %d" % conn.numOfDomains()
def _vbox_get_running_vms():
"""Get number of running vbox VMs"""
vbox_listrunningvms = None
try:
vbox_listrunningvms = check_output(["vboxmanage", "list", "runningvms"]).decode(
"utf-8"
)
except CalledProcessError as e:
print("check_output(['vboxmanage', ...]):", file=sys.stderr)
e.print_stack_trace()
return None
if vbox_listrunningvms is None:
return None
vbox_listrunning_lines = vbox_listrunningvms.strip().split("\n")
if vbox_listrunning_lines[0] == "":
return "VBox VMs: 0"
return "VBox VMs: %d" % len(vbox_listrunning_lines)
def get_running_vms(j):
"""Get number of running VMs (all providers)"""
libvirt_msg = _libvirt_get_running_vms()
vbox_msg = _vbox_get_running_vms()
if libvirt_msg:
j.insert(0, {"full_text": libvirt_msg, "name": "libvirt_vms"})
if vbox_msg:
j.insert(0, {"full_text": vbox_msg, "name": "vbox_vms"})
rx_prev = 0
tx_prev = 0
def net_speed(j):
"""Print enp5s0 speed.
Inspired by https://github.com/wardi/speedometer/blob/191ac78fd0cf08fc88ccb9c431fc2e53ae470f91/speedometer.py#L599-L620
"""
global rx_prev, tx_prev
r = re.compile(r"^\s*" + re.escape("enp5s0") + r":(.*)$", re.MULTILINE)
f = open("/proc/net/dev")
dev_lines = f.read()
f.close()
match = r.search(dev_lines)
if not match:
return
parts = match.group(1).split()
rx_now = int(parts[0])
tx_now = int(parts[8])
text = "RX ?, TX ?"
if rx_prev != 0:
assert rx_now >= rx_prev
rx_speed = humanize.naturalsize((rx_now - rx_prev) / 5, binary=True)
tx_speed = humanize.naturalsize((tx_now - tx_prev) / 5, binary=True)
rx_speed = rx_speed.replace("Bytes", "B")
tx_speed = tx_speed.replace("Bytes", "B")
text = f"rx: {rx_speed}/s, tx: {tx_speed}/s"
rx_prev = rx_now
tx_prev = tx_now
j.insert(0, {"full_text": text, "name": "netspeed"})
def print_line(message):
"""Non-buffered printing to stdout."""
sys.stdout.write(message + "\n")
sys.stdout.flush()
def read_line():
"""Interrupted respecting reader for stdin."""
# try reading a line, removing any extra whitespace
try:
line = sys.stdin.readline().strip()
# i3status sends EOF, or an empty line
if not line:
sys.exit(3)
return line
# exit on ctrl-c
except KeyboardInterrupt:
sys.exit()
if __name__ == "__main__":
# connect to MPD
mpd_reconnect()
# Skip the first line which contains the version header.
print_line(read_line())
# The second line contains the start of the infinite array.
print_line(read_line())
hostname = socket.gethostname().split(".")[0]
while True:
line, prefix = read_line(), ""
# ignore comma at start of lines
if line.startswith(","):
line, prefix = line[1:], ","
j = json.loads(line)
# insert information into the start of the json, but could be anywhere
# CHANGE THIS LINE TO INSERT SOMETHING ELSE
# j.insert(0, {'full_text' : '%s' % get_governor(), 'name' : 'gov'})
if dropbox_installed():
j.insert(
0,
{"full_text": "Dropbox: %s" % get_dropbox_status(), "name": "dropbox"},
)
if hostname == "phoenix":
net_speed(j)
j.insert(0, {"full_text": "layout: %s" % get_current_kbmap(), "name": "kbmap"})
if hostname in ["wyvern", "phoenix"]:
mpd_msg = get_mpd_song()
if mpd_msg:
j.insert(0, {"full_text": mpd_msg, "name": "mpd"})
gpmdp_msg = get_gpmdp_song()
if gpmdp_msg:
j.insert(0, {"full_text": gpmdp_msg, "name": "gpmdp"})
get_running_vms(j)
# and echo back new encoded json
print_line(prefix + json.dumps(j))
| {
"content_hash": "36423717c2e0c81b00f7cc53b88a7716",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 123,
"avg_line_length": 28.321070234113712,
"alnum_prop": 0.5520784128483703,
"repo_name": "simu/dotfiles",
"id": "af04b4a0958a5119fe6cdf7c86808e1dc9603fe3",
"size": "9515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "i3wm/i3status-wrapper.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15810"
},
{
"name": "Jinja",
"bytes": "23905"
},
{
"name": "Python",
"bytes": "36266"
},
{
"name": "Shell",
"bytes": "20614"
},
{
"name": "Vim script",
"bytes": "576963"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.append(os.path.abspath('_themes'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'smother'
copyright = '2016, Chris Beaumont'
author = 'Chris Beaumont'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'smother'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'smother v0.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'smotherdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'smother.tex', 'smother Documentation',
'Chris Beaumont', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'smother', 'smother Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'smother', 'smother Documentation',
author, 'smother', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| {
"content_hash": "986a01b5caf444174b9dd1ce9f72a56d",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 80,
"avg_line_length": 28.33228840125392,
"alnum_prop": 0.6866563398982075,
"repo_name": "ChrisBeaumont/smother",
"id": "3aa0e162c82148e704565d7be87031dde858548a",
"size": "9721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1487"
},
{
"name": "Python",
"bytes": "48372"
}
],
"symlink_target": ""
} |
"""Use the Doctest plugin with --with-doctest or the NOSE_WITH_DOCTEST
environment variable to enable collection and execution of doctests. doctest_
tests are usually included in the tested package, not grouped into packages or
modules of their own. For this reason, nose will try to detect and run doctest
tests only in the non-test packages it discovers in the working
directory. Doctests may also be placed into files other than python modules,
in which case they can be collected and executed by using the
--doctest-extension switch or NOSE_DOCTEST_EXTENSION environment variable to
indicate which file extension(s) to load.
doctest tests are run like any other test, with the exception that output
capture does not work, because doctest does its own output capture in the
course of running a test.
This module also includes a specialized version of nose.run() that
makes it easier to write doctests that test test runs.
.. _doctest: http://docs.python.org/lib/module-doctest.html
"""
from __future__ import generators
import logging
import os
from inspect import getmodule
from nose.plugins.base import Plugin
from nose.util import anyp, getpackage, test_address, resolve_name, tolist
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
log = logging.getLogger(__name__)
try:
import doctest
doctest.DocTestCase
# system version of doctest is acceptable, but needs a monkeypatch
except (ImportError, AttributeError):
# system version is too old
import nose.ext.dtcompat as doctest
#
# Doctest and coverage don't get along, so we need to create
# a monkeypatch that will replace the part of doctest that
# interferes with coverage reports.
#
# The monkeypatch is based on this zope patch:
# http://svn.zope.org/Zope3/trunk/src/zope/testing/doctest.py?rev=28679&r1=28703&r2=28705
#
_orp = doctest._OutputRedirectingPdb
class NoseOutputRedirectingPdb(_orp):
def __init__(self, out):
self.__debugger_used = False
_orp.__init__(self, out)
def set_trace(self):
self.__debugger_used = True
_orp.set_trace(self)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
_orp.set_continue(self)
doctest._OutputRedirectingPdb = NoseOutputRedirectingPdb
class Doctest(Plugin):
"""
Activate doctest plugin to find and run doctests in non-test modules.
"""
extension = None
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
parser.add_option('--doctest-tests', action='store_true',
dest='doctest_tests',
default=env.get('NOSE_DOCTEST_TESTS'),
help="Also look for doctests in test modules. "
"Note that classes, methods and functions should "
"have either doctests or non-doctest tests, "
"not both. [NOSE_DOCTEST_TESTS]")
parser.add_option('--doctest-extension', action="append",
dest="doctestExtension",
help="Also look for doctests in files with "
"this extension [NOSE_DOCTEST_EXTENSION]")
# Set the default as a list, if given in env; otherwise
# an additional value set on the command line will cause
# an error.
env_setting = env.get('NOSE_DOCTEST_EXTENSION')
if env_setting is not None:
parser.set_defaults(doctestExtension=tolist(env_setting))
def configure(self, options, config):
Plugin.configure(self, options, config)
self.doctest_tests = options.doctest_tests
try:
self.extension = tolist(options.doctestExtension)
except AttributeError:
# 2.3, no other-file option
self.extension = None
self.finder = doctest.DocTestFinder()
def loadTestsFromModule(self, module):
if not self.matches(module.__name__):
log.debug("Doctest doesn't want module %s", module)
return
tests = self.finder.find(module)
if not tests:
return
tests.sort()
module_file = module.__file__
if module_file[-4:] in ('.pyc', '.pyo'):
module_file = module_file[:-1]
for test in tests:
if not test.examples:
continue
if not test.filename:
test.filename = module_file
yield DocTestCase(test)
def loadTestsFromFile(self, filename):
if self.extension and anyp(filename.endswith, self.extension):
name = os.path.basename(filename)
dh = open(filename)
try:
doc = dh.read()
finally:
dh.close()
parser = doctest.DocTestParser()
test = parser.get_doctest(
doc, globs={'__file__': filename}, name=name,
filename=filename, lineno=0)
if test.examples:
yield DocFileCase(test)
else:
yield False # no tests to load
def makeTest(self, obj, parent):
"""Look for doctests in the given object, which will be a
function, method or class.
"""
doctests = self.finder.find(obj, module=getmodule(parent))
if doctests:
for test in doctests:
if len(test.examples) == 0:
continue
yield DocTestCase(test, obj=obj)
def matches(self, name):
"""Doctest wants only non-test modules in general.
"""
# FIXME this seems wrong -- nothing is ever going to
# fail this test, since we're given a module NAME not FILE
if name == '__init__.py':
return False
# FIXME don't think we need include/exclude checks here?
return ((self.doctest_tests or not self.conf.testMatch.search(name)
or (self.conf.include
and filter(None,
[inc.search(name)
for inc in self.conf.include])))
and (not self.conf.exclude
or not filter(None,
[exc.search(name)
for exc in self.conf.exclude])))
def wantFile(self, file):
# always want .py files
if file.endswith('.py'):
return True
# also want files that match my extension
if (self.extension
and anyp(file.endswith, self.extension)
and (not self.conf.exclude
or not filter(None,
[exc.search(file)
for exc in self.conf.exclude]))):
return True
return None
class DocTestCase(doctest.DocTestCase):
"""Overrides DocTestCase to
provide an address() method that returns the correct address for
the doctest case. To provide hints for address(), an obj may also
be passed -- this will be used as the test object for purposes of
determining the test address, if it is provided.
"""
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, obj=None):
self._nose_obj = obj
super(DocTestCase, self).__init__(
test, optionflags=optionflags, setUp=None, tearDown=None,
checker=None)
def address(self):
if self._nose_obj is not None:
return test_address(self._nose_obj)
return test_address(resolve_name(self._dt_test.name))
# doctests loaded via find(obj) omit the module name
# so we need to override id, __repr__ and shortDescription
# bonus: this will squash a 2.3 vs 2.4 incompatiblity
def id(self):
name = self._dt_test.name
filename = self._dt_test.filename
if filename is not None:
pk = getpackage(filename)
if not name.startswith(pk):
name = "%s.%s" % (pk, name)
return name
def __repr__(self):
name = self.id()
name = name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return 'Doctest: %s' % self.id()
class DocFileCase(doctest.DocFileCase):
"""Overrides to provide address() method that returns the correct
address for the doc file case.
"""
def address(self):
return (self._dt_test.filename, None, None)
def run(*arg, **kw):
"""DEPRECATED: moved to nose.plugins.plugintest.
"""
import warnings
warnings.warn("run() has been moved to nose.plugins.plugintest. Please "
"update your imports.", category=DeprecationWarning,
stacklevel=2)
from nose.plugins.plugintest import run
run(*arg, **kw)
| {
"content_hash": "b90b2f55c433e47eda7233ad8a761b41",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 89,
"avg_line_length": 37.77272727272727,
"alnum_prop": 0.5971994311344492,
"repo_name": "santisiri/popego",
"id": "8fe9854b73f292caa2fd306d0794c550db8c6a66",
"size": "9141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/nose-0.10.1-py2.5.egg/nose/plugins/doctests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1246"
},
{
"name": "C",
"bytes": "504141"
},
{
"name": "C++",
"bytes": "26125"
},
{
"name": "CSS",
"bytes": "342653"
},
{
"name": "FORTRAN",
"bytes": "4872"
},
{
"name": "GAP",
"bytes": "13267"
},
{
"name": "Genshi",
"bytes": "407"
},
{
"name": "Groff",
"bytes": "17116"
},
{
"name": "HTML",
"bytes": "383181"
},
{
"name": "JavaScript",
"bytes": "1090769"
},
{
"name": "Makefile",
"bytes": "2441"
},
{
"name": "Mako",
"bytes": "376944"
},
{
"name": "Python",
"bytes": "20895618"
},
{
"name": "Ruby",
"bytes": "3380"
},
{
"name": "Shell",
"bytes": "23581"
},
{
"name": "Smarty",
"bytes": "522"
},
{
"name": "TeX",
"bytes": "35712"
}
],
"symlink_target": ""
} |
import sys
import os
# Following function code snippet was found on StackOverflow (with a change to lower
# camel-case on the variable names):
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def find_executable(program):
def is_exe(fPath):
return os.path.isfile(fPath) and os.access(fPath, os.X_OK)
fPath, fName = os.path.split(program)
if fPath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def determine_year(version):
if version == 8:
return 2005
elif version == 9:
return 2008
elif version == 10:
return 2010
elif version == 11:
return 2012
elif version == 12:
return 2013
elif version == 14:
return 2015
else:
return 0000
# Determine if msbuild is in the path, then call it to determine the version and parse
# it into a format we can use, which is "<version_num> <version_year>".
if __name__ == '__main__':
exeName = 'msbuild.exe'
versionCall = exeName + ' /ver'
# Determine if the executable exists in the path, this is critical.
#
foundExeName = find_executable(exeName)
# If not found, return an invalid number but in the appropriate format so it will
# fail if the program above tries to use it.
if foundExeName == None:
print('00 0000')
print('Executable ' + exeName + ' not found in PATH!')
else:
sysCallOut = os.popen(versionCall).read()
version = None
# Split around any spaces first
spaceList = sysCallOut.split(' ')
for spaceString in spaceList:
# If we've already found it, bail.
if version != None:
break
# Now split around line feeds
lineList = spaceString.split('\n')
for curLine in lineList:
# If we've already found it, bail.
if version != None:
break
# We only want to continue if there's a period in the list
if '.' not in curLine:
continue
# Get the first element and determine if it is a number, if so, we've
# got our number.
splitAroundPeriod = curLine.split('.')
if splitAroundPeriod[0].isdigit():
version = int (splitAroundPeriod[0])
break
# Failsafe to return a number in the proper format, but one that will fail.
if version == None:
version = 00
# Determine the year associated with that version
year = determine_year(version)
# Output the string we need for Cmake to properly build for this version
print(str(version) + ' ' + str(year))
| {
"content_hash": "2cf6a9319cc987b884af483834c404a4",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 86,
"avg_line_length": 31.705263157894738,
"alnum_prop": 0.5773572377158035,
"repo_name": "Radamanthe/VulkanSamples",
"id": "a86329f8b12ae8b30da6bf9736c428924b1eae47",
"size": "3743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "determine_vs_version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "26152"
},
{
"name": "C",
"bytes": "2609031"
},
{
"name": "C++",
"bytes": "9935652"
},
{
"name": "CMake",
"bytes": "86637"
},
{
"name": "GLSL",
"bytes": "8747"
},
{
"name": "HTML",
"bytes": "31433"
},
{
"name": "JavaScript",
"bytes": "16881"
},
{
"name": "M4",
"bytes": "19093"
},
{
"name": "Makefile",
"bytes": "85475"
},
{
"name": "NSIS",
"bytes": "27655"
},
{
"name": "PowerShell",
"bytes": "32236"
},
{
"name": "Python",
"bytes": "793498"
},
{
"name": "Shell",
"bytes": "359605"
}
],
"symlink_target": ""
} |
"""Operator Entity."""
import geom
import util
import errors
from entity import Entity
from stop import Stop
from route import Route
def sorted_onestop(entities):
return sorted(entities, key=lambda x:x.onestop())
class Operator(Entity):
"""Transitland Operator Entity."""
onestop_type = 'o'
def init(self, **data):
self.timezone = data.pop('timezone', None)
def geohash(self):
return geom.geohash_features(self.stops())
def _cache_onestop(self):
key = 'onestopId'
self.data[key] = self.data.get(key) or self.make_onestop()
for i in self.routes():
i.data[key] = i.data.get(key) or i.make_onestop()
for i in self.stops():
i.data[key] = i.data.get(key) or i.make_onestop()
def add_tags_gtfs(self, gtfs_entity):
keys = [
'agency_url',
'agency_phone',
'agency_lang',
'agency_fare_url',
'agency_id'
]
data = gtfs_entity.data._asdict()
self.timezone = data.pop('agency_timezone', None)
for key in keys:
if key in data:
self.set_tag(key, data[key])
@classmethod
def from_json(cls, data):
"""Load Operator from GeoJSON."""
agency = cls(**data)
# Add stops
stops = {}
for feature in data['features']:
if feature['onestopId'].startswith('s'):
stop = Stop.from_json(feature)
stops[stop.onestop()] = stop
# Add routes
for feature in data['features']:
if feature['onestopId'].startswith('r'):
route = Route.from_json(feature)
# Get stop by id, add as child.
for stop in feature['serves']:
route.pclink(route, stops[stop])
agency.pclink(agency, route)
return agency
def json(self):
return {
'type': 'FeatureCollection',
'geometry': self.geometry(),
'properties': {},
'name': self.name(),
'tags': self.tags(),
'timezone': self.timezone,
'onestopId': self.onestop(),
'identifiers': sorted(self.identifiers()),
'serves': sorted(self.serves()),
'features': [
i.json() for i in sorted_onestop(self.routes() | self.stops())
]
}
# Graph
def serves(self):
ret = set([i.onestop() for i in self.stops()])
ret |= set(self.data.get('serves', []))
return ret
def routes(self):
return set(self.children)
def route(self, onestop_id):
"""Return a single route by Onestop ID."""
return util.filtfirst(self.routes(), onestop=onestop_id)
def stops(self):
stops = set()
for i in self.routes():
stops |= i.stops()
return stops
def stop(self, onestop_id):
"""Return a single stop by Onestop ID."""
return util.filtfirst(self.stops(), onestop=onestop_id)
| {
"content_hash": "bbc5116378d44fc28a2a5accf830b9bf",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 70,
"avg_line_length": 26.77227722772277,
"alnum_prop": 0.6042899408284024,
"repo_name": "srthurman/transitland-python-client",
"id": "9d02c688827d244371c78dd2a0cc8c148e3fad80",
"size": "2704",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "transitland/operator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48540"
}
],
"symlink_target": ""
} |
"""Unit tests for test/portage module."""
from __future__ import print_function
import pytest # pylint: disable=import-error
import chromite as cr
# Pytest's method of declaring fixtures causes Pylint to complain about
# redefined outer names.
# pylint: disable=redefined-outer-name
_OVERLAY_STACK_PARAMS = list(range(1, len(cr.test.Overlay.HIERARCHY_NAMES) + 1))
@pytest.mark.parametrize('height', _OVERLAY_STACK_PARAMS)
def test_overlay_stack_masters(height, overlay_stack):
"""Test that overlays have the correct masters set."""
overlays = list(overlay_stack(height))
assert overlays[0].masters is None
for x in range(1, height):
assert overlays[x].masters == tuple(overlays[:x])
@pytest.mark.parametrize('height', _OVERLAY_STACK_PARAMS)
def test_overlay_stack_names(height, overlay_stack):
"""Test that generated overlays have the expected names."""
overlays = overlay_stack(height)
for i, o in enumerate(overlays):
assert o.name == cr.test.Overlay.HIERARCHY_NAMES[i]
@pytest.fixture
def minimal_sysroot(overlay_stack, tmp_path_factory):
"""Set up a barebones sysroot with a single associated overlay."""
overlay, = overlay_stack(1)
path = tmp_path_factory.mktemp('minimal-sysroot')
base = overlay.create_profile()
return overlay, cr.test.Sysroot(path, base, overlays=[overlay])
def test_emerge_against_fake_sysroot(minimal_sysroot):
"""Test that a basic `emerge` operation works against a test sysroot."""
overlay, sysroot = minimal_sysroot
pkg1 = cr.test.Package('foo', 'bar')
overlay.add_package(pkg1)
pkg2 = cr.test.Package('foo', 'spam', depend='foo/bar')
overlay.add_package(pkg2)
sysroot.run(['emerge', 'foo/spam'])
res = sysroot.run(['equery', 'list', '*'], stdout=True)
assert 'foo/bar' in res.stdout
| {
"content_hash": "843df700b2094f4947ab021442cee8f3",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 80,
"avg_line_length": 30.775862068965516,
"alnum_prop": 0.7198879551820728,
"repo_name": "endlessm/chromium-browser",
"id": "56851f185d067fcf9c3cc8b0cd13146472aa2890",
"size": "1975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/test/portage_testables_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from django import forms
from django.core.exceptions import ValidationError
from wye.profiles.models import UserType
from . import models
class RegionalLeadForm(forms.ModelForm):
class Meta:
model = models.RegionalLead
exclude = ()
def clean(self):
error_message = []
if (self.cleaned_data.get('location', '') and
self.cleaned_data.get('leads', '')):
location = self.cleaned_data['location']
for u in self.cleaned_data['leads']:
if not u.profile:
error_message.append('Profile for user %s not found' % (u))
elif u.profile.location != location:
error_message.append(
"User %s doesn't belong to region %s" % (u, location))
if error_message:
raise ValidationError(error_message)
def save(self, force_insert=False, force_update=False, commit=True):
m = super(RegionalLeadForm, self).save()
for u in self.cleaned_data['leads']:
u.profile.usertype.add(UserType.objects.get(slug='lead'))
return m
class LocationForm(forms.ModelForm):
class Meta:
model = models.Location
exclude = ()
class StateForm(forms.ModelForm):
class Meta:
model = models.State
exclude = ()
| {
"content_hash": "c7fcd4de8ea27c7d5b0c434ea1487cbb",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 79,
"avg_line_length": 28.617021276595743,
"alnum_prop": 0.5910780669144982,
"repo_name": "DESHRAJ/wye",
"id": "96f74e8b6f1ebb99fcb65e80b466131129ba8797",
"size": "1345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wye/regions/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19955"
},
{
"name": "HTML",
"bytes": "286868"
},
{
"name": "JavaScript",
"bytes": "26849"
},
{
"name": "Python",
"bytes": "125659"
},
{
"name": "Shell",
"bytes": "248"
}
],
"symlink_target": ""
} |
__author__ = 'oshikiri'
__email__ = 't.oshikiri.0137@gmail.com'
__date__ = '2015-02-19'
import os
import sys
from ShiriMas import *
if __name__ == '__main__':
db_path = './db/shiritori-history.sqlite3'
table_name = 'history'
sbot = ShiriMas('shiritori-master', db_path, table_name)
## Slackでの直近のメッセージ
slack_newest_message = sbot.get_slack_newest_message()
slack_newest_text = slack_newest_message['text']
slack_newest_yomi = yomi_shiritori(slack_newest_text)
slack_newest_user = slack_newest_message.get('username')
## DBで直近のメッセージ
db_newest_message = sbot.get_db_newest_message()
## 更新された分のメッセージをDBに追加
sbot.append_messages(slack_newest_message, db_newest_message)
## しりとりの答えを返す
ans = sbot.get_ans(slack_newest_yomi)
sbot.post_shiritori(ans, slack_newest_user, slack_newest_yomi)
| {
"content_hash": "d6aa835e29c9a66f34ac0dcbb32651ac",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 66,
"avg_line_length": 25.757575757575758,
"alnum_prop": 0.6741176470588235,
"repo_name": "oshikiri/shirimas",
"id": "db683f0b1bc35dd7573ec03d2223c8d8a23482e5",
"size": "987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main_shiritori.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10789"
}
],
"symlink_target": ""
} |
"""
Simple returner for Couchbase. Optional configuration
settings are listed below, along with sane defaults.
.. code-block:: yaml
couchbase.host: 'salt'
couchbase.port: 8091
couchbase.bucket: 'salt'
couchbase.ttl: 24
couchbase.password: 'password'
couchbase.skip_verify_views: False
To use the couchbase returner, append '--return couchbase' to the salt command. ex:
.. code-block:: bash
salt '*' test.ping --return couchbase
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' test.ping --return couchbase --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. versionadded:: 2016.3.0
.. code-block:: bash
salt '*' test.ping --return couchbase --return_kwargs '{"bucket": "another-salt"}'
All of the return data will be stored in documents as follows:
JID
===
load: load obj
tgt_minions: list of minions targeted
nocache: should we not cache the return data
JID/MINION_ID
=============
return: return_data
full_ret: full load of job return
"""
import logging
import salt.utils.jid
import salt.utils.json
import salt.utils.minions
try:
import couchbase
HAS_DEPS = True
except ImportError:
HAS_DEPS = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "couchbase"
# some globals
COUCHBASE_CONN = None
DESIGN_NAME = "couchbase_returner"
VERIFIED_VIEWS = False
_json = salt.utils.json.import_json()
def _json_dumps(obj, **kwargs):
return salt.utils.json.dumps(obj, _json_module=_json)
def __virtual__():
if not HAS_DEPS:
return False, "Could not import couchbase returner; couchbase is not installed."
couchbase.set_json_converters(_json_dumps, salt.utils.json.loads)
return __virtualname__
def _get_options():
"""
Get the couchbase options from salt. Apply defaults
if required.
"""
return {
"host": __opts__.get("couchbase.host", "salt"),
"port": __opts__.get("couchbase.port", 8091),
"bucket": __opts__.get("couchbase.bucket", "salt"),
"password": __opts__.get("couchbase.password", ""),
}
def _get_connection():
"""
Global function to access the couchbase connection (and make it if its closed)
"""
global COUCHBASE_CONN
if COUCHBASE_CONN is None:
opts = _get_options()
if opts["password"]:
COUCHBASE_CONN = couchbase.Couchbase.connect(
host=opts["host"],
port=opts["port"],
bucket=opts["bucket"],
password=opts["password"],
)
else:
COUCHBASE_CONN = couchbase.Couchbase.connect(
host=opts["host"], port=opts["port"], bucket=opts["bucket"]
)
return COUCHBASE_CONN
def _verify_views():
"""
Verify that you have the views you need. This can be disabled by
adding couchbase.skip_verify_views: True in config
"""
global VERIFIED_VIEWS
if VERIFIED_VIEWS or __opts__.get("couchbase.skip_verify_views", False):
return
cb_ = _get_connection()
ddoc = {
"views": {
"jids": {
"map": (
"function (doc, meta) { if (meta.id.indexOf('/') === -1 &&"
" doc.load){ emit(meta.id, null) } }"
)
},
"jid_returns": {
"map": (
"function (doc, meta) { if (meta.id.indexOf('/') > -1){ key_parts ="
" meta.id.split('/'); emit(key_parts[0], key_parts[1]); } }"
)
},
}
}
try:
curr_ddoc = cb_.design_get(DESIGN_NAME, use_devmode=False).value
if curr_ddoc["views"] == ddoc["views"]:
VERIFIED_VIEWS = True
return
except couchbase.exceptions.HTTPError:
pass
cb_.design_create(DESIGN_NAME, ddoc, use_devmode=False)
VERIFIED_VIEWS = True
def _get_ttl():
"""
Return the TTL that we should store our objects with
"""
return __opts__.get("couchbase.ttl", 24) * 60 * 60 # keep_jobs is in hours
# TODO: add to returner docs-- this is a new one
def prep_jid(nocache=False, passed_jid=None):
"""
Return a job id and prepare the job id directory
This is the function responsible for making sure jids don't collide (unless
its passed a jid)
So do what you have to do to make sure that stays the case
"""
if passed_jid is None:
jid = salt.utils.jid.gen_jid(__opts__)
else:
jid = passed_jid
cb_ = _get_connection()
try:
cb_.add(
str(jid),
{"nocache": nocache},
ttl=_get_ttl(),
)
except couchbase.exceptions.KeyExistsError:
# TODO: some sort of sleep or something? Spinning is generally bad practice
if passed_jid is None:
return prep_jid(nocache=nocache)
return jid
def returner(load):
"""
Return data to couchbase bucket
"""
cb_ = _get_connection()
hn_key = "{}/{}".format(load["jid"], load["id"])
try:
ret_doc = {"return": load["return"], "full_ret": salt.utils.json.dumps(load)}
cb_.add(
hn_key,
ret_doc,
ttl=_get_ttl(),
)
except couchbase.exceptions.KeyExistsError:
log.error(
"An extra return was detected from minion %s, please verify "
"the minion, this could be a replay attack",
load["id"],
)
return False
def save_load(jid, clear_load, minion=None):
"""
Save the load to the specified jid
"""
cb_ = _get_connection()
try:
jid_doc = cb_.get(str(jid))
except couchbase.exceptions.NotFoundError:
cb_.add(str(jid), {}, ttl=_get_ttl())
jid_doc = cb_.get(str(jid))
jid_doc.value["load"] = clear_load
cb_.replace(str(jid), jid_doc.value, cas=jid_doc.cas, ttl=_get_ttl())
# if you have a tgt, save that for the UI etc
if "tgt" in clear_load and clear_load["tgt"] != "":
ckminions = salt.utils.minions.CkMinions(__opts__)
# Retrieve the minions list
_res = ckminions.check_minions(
clear_load["tgt"], clear_load.get("tgt_type", "glob")
)
minions = _res["minions"]
save_minions(jid, minions)
def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument
"""
Save/update the minion list for a given jid. The syndic_id argument is
included for API compatibility only.
"""
cb_ = _get_connection()
try:
jid_doc = cb_.get(str(jid))
except couchbase.exceptions.NotFoundError:
log.warning("Could not write job cache file for jid: %s", jid)
return False
# save the minions to a cache so we can see in the UI
if "minions" in jid_doc.value:
jid_doc.value["minions"] = sorted(set(jid_doc.value["minions"] + minions))
else:
jid_doc.value["minions"] = minions
cb_.replace(str(jid), jid_doc.value, cas=jid_doc.cas, ttl=_get_ttl())
def get_load(jid):
"""
Return the load data that marks a specified jid
"""
cb_ = _get_connection()
try:
jid_doc = cb_.get(str(jid))
except couchbase.exceptions.NotFoundError:
return {}
ret = {}
try:
ret = jid_doc.value["load"]
ret["Minions"] = jid_doc.value["minions"]
except KeyError as e:
log.error(e)
return ret
def get_jid(jid):
"""
Return the information returned when the specified job id was executed
"""
cb_ = _get_connection()
_verify_views()
ret = {}
for result in cb_.query(
DESIGN_NAME, "jid_returns", key=str(jid), include_docs=True
):
ret[result.value] = result.doc.value
return ret
def get_jids():
"""
Return a list of all job ids
"""
cb_ = _get_connection()
_verify_views()
ret = {}
for result in cb_.query(DESIGN_NAME, "jids", include_docs=True):
ret[result.key] = _format_jid_instance(result.key, result.doc.value["load"])
return ret
def _format_job_instance(job):
"""
Return a properly formatted job dict
"""
ret = {
"Function": job.get("fun", "unknown-function"),
"Arguments": list(job.get("arg", [])),
# unlikely but safeguard from invalid returns
"Target": job.get("tgt", "unknown-target"),
"Target-type": job.get("tgt_type", "list"),
"User": job.get("user", "root"),
}
if "metadata" in job:
ret["Metadata"] = job.get("metadata", {})
else:
if "kwargs" in job:
if "metadata" in job["kwargs"]:
ret["Metadata"] = job["kwargs"].get("metadata", {})
return ret
def _format_jid_instance(jid, job):
"""
Return a properly formatted jid dict
"""
ret = _format_job_instance(job)
ret.update({"StartTime": salt.utils.jid.jid_to_time(jid)})
return ret
| {
"content_hash": "2990bc245400a12510b56f7077cb6450",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 107,
"avg_line_length": 25.77683615819209,
"alnum_prop": 0.5842191780821918,
"repo_name": "saltstack/salt",
"id": "c39edff790a8ebd10ef07c0332943efdc5011d7c",
"size": "9125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/returners/couchbase_return.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
} |
import uuid
import logging
import time
from heat.engine import resource
from heat.common import exception as HeatException
from avi_api import ApiSession
from avi_api import ObjectNotFound
import avi_utils
# for python 2/3 compatibility
try:
basestring
except:
basestring = str
try:
# new way of importing oslo config
from oslo_config import cfg
except ImportError:
# old way of importing oslo config
from oslo.config import cfg
controller_opt = cfg.StrOpt("avi_controller", default="",
help="Avi Controller IP or FQDN")
cfg.CONF.register_opt(controller_opt)
api_timeout = cfg.IntOpt("avi_api_timeout", default=300,
help="Timeout to use for Avi API calls")
cfg.CONF.register_opt(api_timeout)
LOG = logging.getLogger(__name__)
class AviResource(resource.Resource):
resource_name = "" # should be set by derived resource classes
def get_version(self):
return dict(self.properties).get("avi_version", None)
def get_project_name(self):
if not self.context.auth_token_info:
return self.context.tenant
if "access" in self.context.auth_token_info:
return self.context.auth_token_info["access"]["token"][
"tenant"]["name"]
return self.context.auth_token_info['token']['project']['name']
def get_user_name(self):
if not self.context.auth_token_info:
ksc = self.keystone()
username = ksc.domain_admin_user
return username
if "access" in self.context.auth_token_info:
user = self.context.auth_token_info["access"]["user"]
else:
user = self.context.auth_token_info['token']['user']
username = user["name"]
if "domain" in user and user["domain"]["name"] != "Default":
username += "@%s" % user["domain"]["name"]
return username
def get_avi_tenant_uuid(self):
if self.get_project_name() == 'admin':
return "admin"
return avi_utils.os2avi_uuid("tenant",
self.context.tenant_id)
def get_avi_address(self):
address = cfg.CONF.avi_controller
if address:
return address
try:
c = self.client('keystone')
if not hasattr(c, 'url_for'):
c = self.client_plugin('keystone')
if not hasattr(c, 'url_for'):
LOG.error("Couldn't find keystone plugin or client to ",
"get url_for avi-lbaas service, Avi driver ",
"will not work!")
return None
endpoint = c.url_for(service_type="avi-lbaas",
endpoint_type="publicURL")
address = endpoint.split("//")[1].split("/")[0]
except Exception as e:
LOG.exception("Error during finding avi address: %s", e)
return None
return address
def get_avi_client(self):
address = self.get_avi_address()
if not address:
return None
username = self.get_user_name()
password = None
if not self.context.auth_token:
password = self.keystone().domain_admin_password
api_session = ApiSession(
controller_ip=address,
username=username,
token=self.context.auth_token,
password=password,
timeout=cfg.CONF.avi_api_timeout,
)
return api_session
def create_clean_properties(self, inp, field_refs=None, client=None,
keyname=None):
if isinstance(inp, dict):
newdict = dict()
newfrefs = field_refs
if field_refs and keyname and keyname in field_refs:
newfrefs = field_refs[keyname]
for k, v in inp.items():
if v is None:
continue
newdict[k] = self.create_clean_properties(
v, field_refs=newfrefs, client=client, keyname=k)
return newdict
elif isinstance(inp, list):
newlist = []
for entry in inp:
newlist.append(self.create_clean_properties(
entry, field_refs, client, keyname=keyname))
return newlist
elif field_refs and isinstance(inp, basestring):
if keyname and client and inp.startswith("get_avi_uuid_by_name:"):
objname = inp.split(":", 1)[1]
resname = field_refs.get(keyname, "").lower()
if resname:
obj = client.get_object_by_name(
resname, objname,
tenant_uuid=self.get_avi_tenant_uuid()
)
if not obj:
return None
return client.get_obj_uuid(obj)
return inp
def handle_create(self):
client = self.get_avi_client()
res_def = self.create_clean_properties(
dict(self.properties),
field_refs=getattr(self, "field_references", {}),
client=client
)
LOG.debug("Resource def for create: %s", res_def)
res_def.pop("avi_version", None)
try:
obj = client.post(self.resource_name,
api_version=self.get_version(),
data=res_def,
tenant_uuid=self.get_avi_tenant_uuid()
).json()
except Exception as e:
LOG.exception("Error during creation: %s, resname %s, "
"resdef %s headers %s",
e, self.resource_name, res_def, client.headers)
raise
self.resource_id_set(obj['uuid'])
return True
def _show_resource(self, client=None):
if not client:
client = self.get_avi_client()
url = "%s/%s" % (self.resource_name,
self.resource_id)
if self.resource_name in ["virtualservice", "pool"]:
url += "?join_subresources=runtime"
obj = client.get(url,
api_version=self.get_version(),
tenant_uuid=self.get_avi_tenant_uuid()
).json()
return avi_utils.replace_refs_with_uuids(obj)
def _update_obj(self, obj, old_diffs, new_diffs, uniq_keys={}):
for p in new_diffs.keys():
prev_val = old_diffs.get(p, None)
new_val = new_diffs[p]
if isinstance(new_val, dict) or isinstance(prev_val, dict):
if not new_diffs[p]:
obj.pop(p, None)
continue
if not obj.get(p, None):
obj[p] = self.create_clean_properties(new_diffs[p])
continue
if not prev_val:
old_diffs[p] = prev_val = dict()
for k in new_val.keys():
if k not in prev_val:
prev_val[k] = None
self._update_obj(obj[p], prev_val, new_val,
uniq_keys=uniq_keys.get(p, {}))
elif isinstance(new_val, list) or isinstance(prev_val, list):
# figure out which entries match from old and remove them
# from obj;
# then add objects from new_val
if prev_val and obj.get(p, None):
for pitem in prev_val:
pitem = self.create_clean_properties(pitem)
newobjs = []
found = False
for oitem in obj[p]:
if found:
newobjs.append(oitem)
elif avi_utils.cmp_a_in_b(pitem, oitem,
uniq_keys.get(p, {})):
found = True
else:
newobjs.append(oitem)
obj[p] = newobjs
if new_val:
obj[p].extend(self.create_clean_properties(new_val))
else:
obj.pop(p, None)
else:
if new_diffs[p] is not None:
obj[p] = new_diffs[p]
else:
obj.pop(p, None)
return obj
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
client = self.get_avi_client()
obj = self._show_resource(client)
prev_def = self.create_clean_properties(
dict(self.properties),
field_refs=getattr(self, "field_references", {}),
client=client
)
# from IPython.core.debugger import Pdb
# pdb = Pdb()
# pdb.set_trace()
if "avi_version" in prev_def:
obj["avi_version"] = prev_def["avi_version"]
self._update_obj(obj, prev_def, prop_diff,
uniq_keys=getattr(self, "unique_keys", {}))
res_def = self.create_clean_properties(
obj,
field_refs=getattr(self, "field_references", {}),
client=client
)
# we should use version from the updated description
api_version = res_def.pop("avi_version", None)
try:
client.put(
"%s/%s" % (self.resource_name, self.resource_id),
api_version=api_version,
data=res_def,
tenant_uuid=self.get_avi_tenant_uuid()
).json()
except:
LOG.exception("Update failed: (%s, %s): %s",
self.resource_name, self.resource_id, res_def)
raise
return True
def handle_delete(self):
client = self.get_avi_client()
try:
client.delete("%s/%s" % (self.resource_name,
self.resource_id),
tenant_uuid=self.get_avi_tenant_uuid()
).json()
if self.resource_name == 'virtualservice':
LOG.info('await ports cleanup for VS %s', self.resource_id)
time.sleep(30)
except ObjectNotFound as e:
LOG.exception("Object %s not found: %s", (self.resource_name,
self.resource_id), e)
return True
class AviNestedResource(AviResource):
# resoure_name would refer to the top resource that needs to be patched
# a property with the name resource_name + "_uuid" will be the parent
# resource uuid
# nested_property_name would refer to the name of the property in the
# parent resource that needs to be patched
nested_property_name = ""
def get_parent_uuid(self):
parent_uuid_prop = self.resource_name + "_uuid"
return self.properties[parent_uuid_prop]
def handle_create(self):
client = self.get_avi_client()
res_def = self.create_clean_properties(
dict(self.properties),
field_refs=getattr(self, "field_references", {}),
client=client
)
parent_uuid_prop = self.resource_name + "_uuid"
parent_uuid = res_def[parent_uuid_prop]
res_def.pop(parent_uuid_prop)
res_def.pop("avi_version", None)
data = {"update": {self.nested_property_name: [res_def]}}
try:
client.patch("%s/%s" % (self.resource_name,
parent_uuid),
api_version=self.get_version(),
data=data,
tenant_uuid=self.get_avi_tenant_uuid()
).json()
except Exception as e:
LOG.exception("Error during creation: %s, resname %s/%s, data %s",
e, self.resource_name, parent_uuid, data)
raise
return True
def _show_resource(self, client=None):
if not client:
client = self.get_avi_client()
obj = client.get("%s/%s" % (self.resource_name,
self.get_parent_uuid()),
api_version=self.get_version(),
tenant_uuid=self.get_avi_tenant_uuid()
).json()
return obj
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
# force delete and replace
if not prop_diff:
return
if hasattr(HeatException, "UpdateReplace"):
raise HeatException.UpdateReplace()
else: # in older versions
raise resource.UpdateReplace()
def handle_delete(self):
client = self.get_avi_client()
res_def = self.create_clean_properties(
dict(self.properties),
field_refs=getattr(self, "field_references", {}),
client=client
)
parent_uuid_prop = self.resource_name + "_uuid"
parent_uuid = res_def[parent_uuid_prop]
if not parent_uuid:
LOG.info("Parent already deleted!")
return True
res_def.pop(parent_uuid_prop)
res_def.pop("avi_version", None)
data = {"delete": {self.nested_property_name: [res_def]}}
try:
client.patch("%s/%s" % (self.resource_name,
parent_uuid),
api_version=self.get_version(),
data=data,
tenant_uuid=self.get_avi_tenant_uuid()
).json()
except ObjectNotFound as e:
LOG.exception("Object %s not found: %s", (self.resource_name,
parent_uuid), e)
except Exception as e:
LOG.exception("Error during deletion: %s, resname %s/%s, data %s",
e, self.resource_name, parent_uuid, data)
raise
return True
| {
"content_hash": "0bf263388faa170be80f33238f39d62f",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 78,
"avg_line_length": 39.10803324099723,
"alnum_prop": 0.5050290409406432,
"repo_name": "avinetworks/avi-heat",
"id": "0caad831c0ee906d91611b7e1f10c5e4a06f04c9",
"size": "14118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avi/heat/avi_resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1341205"
},
{
"name": "Shell",
"bytes": "577"
}
],
"symlink_target": ""
} |
import praw
import urllib2
from urlparse import urlparse
from bs4 import BeautifulSoup
from goose import Goose
from passcodes import (REDDIT_USERNAME,
REDDIT_PASSWORD)
TAG_LIST = {
'title':'title',
'link':'url',
'id':'srcid',
'guid':'srcid',
'published':'published',
'pubDat':'published',
'updated':'updated',
'summary':'summary',
'description':'summary',
'content':'content',
}
class ReadBase(object):
def __init__(self, url):
self.url = url
self.html = self.get_html()
self.soup = self.set_soup(self.html)
def get_html(self):
fstream = urllib2.urlopen(self.url)
html = fstream.read()
fstream.close()
return html
def set_soup(self, html):
return BeautifulSoup(html)
def find_domain_url(self):
urlpath = urlparse(self.url)
if urlpath.netloc == 'feeds.feedburner.com':
title = self.soup.find('title')
domain_url = self.soup.find('link').string
else:
title = ''
domain_url = 'http://%s' % urlpath.netloc
return domain_url, title
#TODO: This is silly, look into building RSS XML to JSON parser
class ReadRSS(ReadBase):
taglist = TAG_LIST
def __init__(self, url):
super(ReadRSS, self).__init__(url)
self.entry_list = {}
self.rss_entries = []
def find_entries(self):
if self.soup:
self.rss_entries = self.soup.find_all('entry')
if not self.rss_entries:
self.rss_entries = self.soup.find_all('item')
def parse_entries(self):
self.find_entries()
for entry in self.rss_entries:
rssids = entry.find_all('id')
if rssids:
rssid = rssids[0]
else:
rssguids = entry.find_all('guid')
if rssguids:
rssid = rssguids[0]
if rssid:
key = str(rssid.string)
self.entry_list[key] = self.parse_entry(entry)
return self.entry_list
def parse_entry(self, entry):
entry_data = {}
for tag, value in self.taglist.iteritems():
try:
entry_tags = entry.find_all(tag)[0]
except (IndexError, KeyError):
continue
if tag == 'link' and entry_tags:
try:
link_tag = entry_tags.find_all('link')[0]
entry_data['url'] = link_tag.get('href')
except (IndexError, KeyError):
pass
else:
entry_data[value] = entry_tags.string
return entry_data
class ReadPage(ReadBase):
def __init__(self, **kwargs):
super(ReadPage, self).__init__(**kwargs)
self.article = self.get_article_reader()
def get_article_reader(self):
g = Goose()
return g.extract(url=self.url)
def find_content(self):
content = '%s\n' % self.article.cleaned_text
for movie in self.article.movies:
content += "%s\n" % movie.embed_code
return dict(url = self.url,
title = self.article.title,
summary = self.article.meta_description,
content = content)
class ReadReddit(object):
def __init__(self):
self.redditor = praw.Reddit(user_agent='my_reddit_parcel')
self.redditor.login(username=REDDIT_USERNAME, password=REDDIT_PASSWORD)
def get_front_page(self):
fplist = []
for submission in self.redditor.get_front_page():
key = "%s|%s" % (submission.subreddit_id, submission.id)
selftext = submission.selftext_html
if selftext:
content = selftext
else:
content = submission.url
fplist[key] = dict(title=submission.title,
content=content)
| {
"content_hash": "2ef65bb4e61eb530d7e8aed866bbbcb5",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 79,
"avg_line_length": 29.4,
"alnum_prop": 0.5384227765180146,
"repo_name": "vivyly/parcellate",
"id": "8c5aea0a7bf3d2abec681b02896e4cc86fcd0ba5",
"size": "3969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parcellate/apps/winparcel/lib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "851"
},
{
"name": "JavaScript",
"bytes": "167"
},
{
"name": "Python",
"bytes": "173207"
},
{
"name": "Shell",
"bytes": "5083"
}
],
"symlink_target": ""
} |
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.libraries import client
def resolve(url):
try:
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['urlback'][0]
except: pass
id = re.compile('/video/([\w]+)').findall(url)[0]
u = 'http://www.dailymotion.com/sequence/full/%s' % id
result = client.request(u)
result = urllib.unquote(result).replace('\\/', '/').replace('\n', '').replace('\'', '"').replace(' ', '')
content = re.compile('"content_type":"(.+?)"').findall(result)
content = '' if len(content) == 0 else content[0]
if content == 'live':
url = re.compile('"autoURL":"(.+?)"').findall(result)[0]
protocol = urlparse.parse_qs(urlparse.urlparse(url).query)['protocol'][0]
url = url.replace('protocol=%s' % protocol, 'protocol=hls')
url += '&redirect=0'
url = client.request(url)
return url
else:
u = 'http://www.dailymotion.com/embed/video/%s' % id
result = client.request(u, cookie='ff=off')
result = urllib.unquote(result).replace('\\/', '/').replace('\n', '').replace('\'', '"').replace(' ', '')
url = []
try: url += [{'quality': 'HD', 'url': client.request(re.compile('"720":.+?"url":"(.+?)"').findall(result)[0], output='geturl')}]
except: pass
try: url += [{'quality': 'SD', 'url': client.request(re.compile('"480":.+?"url":"(.+?)"').findall(result)[0], output='geturl')}]
except: pass
if not url == []: return url
try: url += [{'quality': 'SD', 'url': client.request(re.compile('"380":.+?"url":"(.+?)"').findall(result)[0], output='geturl')}]
except: pass
if not url == []: return url
try: url += [{'quality': 'SD', 'url': client.request(re.compile('"240":.+?"url":"(.+?)"').findall(result)[0], output='geturl')}]
except: pass
if url == []: return
return url
except:
return
| {
"content_hash": "81d79d1136673efb0196cf5b3b60c5ac",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 140,
"avg_line_length": 40.21739130434783,
"alnum_prop": 0.5661261261261261,
"repo_name": "mrknow/filmkodi",
"id": "63b28c0f1ffd89dc59fedfc6bf63342ca219c094",
"size": "2800",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugin.video.fanfilm/resources/lib/resolvers/dailymotion.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7510"
},
{
"name": "Python",
"bytes": "8058464"
},
{
"name": "Shell",
"bytes": "18531"
}
],
"symlink_target": ""
} |
"""Defines several modules with classes for X12 message processing.
We have to disentangle two separate concepts.
- **Message Type**. This is the structure of the message.
The structure is defined by an :class:`X12.parse.Message` object. In addition
to defining the structure, a parser can also unmarshall a block of text.
It can also be used for meta-level processing like emitting DB designs.
- **Message Instance**. This a specific message's data; a collection of
Segments which can be interpreted as a Message with Loops. (The Loops
are not stated, only the Segments.) A message instance can be built
by unmarshalling a block of text, or it can be built "manually" using
object constructors. A message instance can unmarshall itself, returning
a block of text.
The :mod:`X12.parse` package defines the classes which are used to build an X12 message
parser (or "unmarshaller"). The classes are generic, and don't recognize any
specific X12 message structure.
A parser for a given message type can be built from the :mod:`X12.parse` classes manually or
by a conversion tool.
The manual construction involves writing a bunch of object constructors to build parsers
for the Message, the Loops within the Message, the Segments within the Loops,
and the Composites and Elements within the Segments.
A conversion tool usually reads some other meta-data version of the message structure,
and builds an :mod:`X12.parse` structure. There is a conversion tool for :file:`.xml` files. Both of these conversion tools
build the complete :mod:`X12.parse` object, which can either be used to parse messages,
or can be used for other meta-level processing.
The :mod:`X12.map` package supports meta-level processing. This includes subclasses
of :class:`X12.parse.StructureVisitor` that can traverse an :mod:`X12.parse` object to
report on its structure.
This is used by the conversion tools to traverse the definitions
and emit the source code, Django class definitions or raw SQL definitions
for the message that the parser recognizes.
The :mod:`X12.message` package defines a message instance.
``map`` sub-package
====================
.. automodule:: X12.map
``message`` sub-package
========================
.. automodule:: X12.message
``file`` module
====================
.. automodule:: X12.file
``parse`` module
====================
.. automodule:: X12.parse
"""
| {
"content_hash": "24afe48ab565db5bacc7ae82d4fe56d1",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 125,
"avg_line_length": 39.70967741935484,
"alnum_prop": 0.7201462225832657,
"repo_name": "sbuss/TigerShark",
"id": "125c6caf00928f3120ead3bae9e7cc507ac9b0c8",
"size": "2484",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tigershark/X12/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "57514"
},
{
"name": "JavaScript",
"bytes": "99474"
},
{
"name": "Perl",
"bytes": "39706"
},
{
"name": "Python",
"bytes": "4119550"
},
{
"name": "Shell",
"bytes": "8866"
}
],
"symlink_target": ""
} |
import os
from admin_scripts.tests import AdminScriptTestCase
from django.apps import apps
from django.core import management
from django.core.management import BaseCommand, CommandError, find_commands
from django.core.management.utils import find_command, popen_wrapper
from django.db import connection
from django.test import SimpleTestCase, override_settings
from django.test.utils import captured_stderr, extend_sys_path
from django.utils import translation
from django.utils._os import upath
from django.utils.six import StringIO
# A minimal set of apps to avoid system checks running on all apps.
@override_settings(
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'user_commands',
],
)
class CommandTests(SimpleTestCase):
def test_command(self):
out = StringIO()
management.call_command('dance', stdout=out)
self.assertIn("I don't feel like dancing Rock'n'Roll.\n", out.getvalue())
def test_command_style(self):
out = StringIO()
management.call_command('dance', style='Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
# Passing options as arguments also works (thanks argparse)
management.call_command('dance', '--style', 'Jive', stdout=out)
self.assertIn("I don't feel like dancing Jive.\n", out.getvalue())
def test_language_preserved(self):
out = StringIO()
with translation.override('fr'):
management.call_command('dance', stdout=out)
self.assertEqual(translation.get_language(), 'fr')
def test_explode(self):
""" Test that an unknown command raises CommandError """
with self.assertRaises(CommandError):
management.call_command(('explode',))
def test_system_exit(self):
""" Exception raised in a command should raise CommandError with
call_command, but SystemExit when run from command line
"""
with self.assertRaises(CommandError):
management.call_command('dance', example="raise")
with captured_stderr() as stderr, self.assertRaises(SystemExit):
management.ManagementUtility(['manage.py', 'dance', '--example=raise']).execute()
self.assertIn("CommandError", stderr.getvalue())
def test_deactivate_locale_set(self):
# Deactivate translation when set to true
out = StringIO()
with translation.override('pl'):
management.call_command('leave_locale_alone_false', stdout=out)
self.assertEqual(out.getvalue(), "")
def test_configured_locale_preserved(self):
# Leaves locale from settings when set to false
out = StringIO()
with translation.override('pl'):
management.call_command('leave_locale_alone_true', stdout=out)
self.assertEqual(out.getvalue(), "pl\n")
def test_find_command_without_PATH(self):
"""
find_command should still work when the PATH environment variable
doesn't exist (#22256).
"""
current_path = os.environ.pop('PATH', None)
try:
self.assertIsNone(find_command('_missing_'))
finally:
if current_path is not None:
os.environ['PATH'] = current_path
def test_discover_commands_in_eggs(self):
"""
Test that management commands can also be loaded from Python eggs.
"""
egg_dir = '%s/eggs' % os.path.dirname(upath(__file__))
egg_name = '%s/basic.egg' % egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['commandegg']):
cmds = find_commands(os.path.join(apps.get_app_config('commandegg').path, 'management'))
self.assertEqual(cmds, ['eggcommand'])
def test_call_command_option_parsing(self):
"""
When passing the long option name to call_command, the available option
key is the option dest name (#22985).
"""
out = StringIO()
management.call_command('dance', stdout=out, opt_3=True)
self.assertIn("option3", out.getvalue())
self.assertNotIn("opt_3", out.getvalue())
self.assertNotIn("opt-3", out.getvalue())
def test_call_command_option_parsing_non_string_arg(self):
"""
It should be possible to pass non-string arguments to call_command.
"""
out = StringIO()
management.call_command('dance', 1, verbosity=0, stdout=out)
self.assertIn("You passed 1 as a positional argument.", out.getvalue())
def test_calling_a_command_with_only_empty_parameter_should_ends_gracefully(self):
out = StringIO()
management.call_command('hal', "--empty", stdout=out)
self.assertIn("Dave, I can't do that.\n", out.getvalue())
def test_calling_command_with_app_labels_and_parameters_should_be_ok(self):
out = StringIO()
management.call_command('hal', 'myapp', "--verbosity", "3", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_command_with_parameters_and_app_labels_at_the_end_should_be_ok(self):
out = StringIO()
management.call_command('hal', "--verbosity", "3", "myapp", stdout=out)
self.assertIn("Dave, my mind is going. I can feel it. I can feel it.\n", out.getvalue())
def test_calling_a_command_with_no_app_labels_and_parameters_should_raise_a_command_error(self):
out = StringIO()
with self.assertRaises(CommandError):
management.call_command('hal', stdout=out)
def test_output_transaction(self):
out = StringIO()
management.call_command('transaction', stdout=out, no_color=True)
output = out.getvalue().strip()
self.assertTrue(output.startswith(connection.ops.start_transaction_sql()))
self.assertTrue(output.endswith(connection.ops.end_transaction_sql()))
def test_call_command_no_checks(self):
"""
By default, call_command should not trigger the check framework, unless
specifically asked.
"""
self.counter = 0
def patched_check(self_, **kwargs):
self.counter = self.counter + 1
saved_check = BaseCommand.check
BaseCommand.check = patched_check
try:
management.call_command("dance", verbosity=0)
self.assertEqual(self.counter, 0)
management.call_command("dance", verbosity=0, skip_checks=False)
self.assertEqual(self.counter, 1)
finally:
BaseCommand.check = saved_check
class CommandRunTests(AdminScriptTestCase):
"""
Tests that need to run by simulating the command line, not by call_command.
"""
def tearDown(self):
self.remove_settings('settings.py')
def test_script_prefix_set_in_commands(self):
self.write_settings('settings.py', apps=['user_commands'], sdict={
'ROOT_URLCONF': '"user_commands.urls"',
'FORCE_SCRIPT_NAME': '"/PREFIX/"',
})
out, err = self.run_manage(['reverse_url'])
self.assertNoOutput(err)
self.assertEqual(out.strip(), '/PREFIX/some/url/')
class UtilsTests(SimpleTestCase):
def test_no_existent_external_program(self):
with self.assertRaises(CommandError):
popen_wrapper(['a_42_command_that_doesnt_exist_42'])
| {
"content_hash": "95f4245deacde4382c1c890b06d4db0e",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 104,
"avg_line_length": 40.053763440860216,
"alnum_prop": 0.6409395973154363,
"repo_name": "varunnaganathan/django",
"id": "2c19339ddc18d2a7ecd45a68b49309618a5a6aee",
"size": "7450",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/user_commands/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52334"
},
{
"name": "HTML",
"bytes": "170527"
},
{
"name": "JavaScript",
"bytes": "256023"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11481044"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""Tests for ceviche_challenges.modes."""
from absl.testing import absltest
from ceviche import constants
from ceviche_challenges import defs
from ceviche_challenges import modes
import numpy as np
class ModesTest(absltest.TestCase):
def test_port(self):
"""Test the port definition."""
port = modes.WaveguidePort(
x=10,
y=20,
width=20,
order=1,
dir=defs.Direction.X_POS,
offset=1,
)
np.testing.assert_array_equal(port.coords()[0], port.x)
self.assertEqual(port.coords()[0].size, port.width)
self.assertEqual(port.coords()[1].size, port.width)
port = modes.WaveguidePort(
x=10,
y=20,
width=20,
order=1,
dir=defs.Direction.X_NEG,
offset=1,
)
np.testing.assert_array_equal(port.coords()[0], port.x)
self.assertEqual(port.coords()[0].size, port.width)
self.assertEqual(port.coords()[1].size, port.width)
port = modes.WaveguidePort(
x=10,
y=20,
width=20,
order=1,
dir=defs.Direction.Y_POS,
offset=1,
)
np.testing.assert_array_equal(port.coords()[1], port.y)
self.assertEqual(port.coords()[0].size, port.width)
self.assertEqual(port.coords()[1].size, port.width)
port = modes.WaveguidePort(
x=10,
y=20,
width=20,
order=1,
dir=defs.Direction.Y_NEG,
offset=1,
)
np.testing.assert_array_equal(port.coords()[1], port.y)
self.assertEqual(port.coords()[0].size, port.width)
self.assertEqual(port.coords()[1].size, port.width)
with self.assertRaises(ValueError):
modes.WaveguidePort(
x=10,
y=20,
width=21,
order=1,
dir=defs.Direction.X_POS,
offset=1,
)
def test_solver(self):
"""Test the eigenmode solver."""
# Create a silicon waveguide cross section
n = 150
width = 20
omega = 200e12 * 2 * np.pi
dl = 25e-9
k0 = omega / constants.C_0
epsilon_r = np.ones((n,))
epsilon_r[n // 2 - width // 2:n // 2 + width // 2] = 12.25
# Solve for modes of different transverse order
_, _, beta1 = modes.solve_modes(epsilon_r, omega, dl, order=1)
_, _, beta2 = modes.solve_modes(epsilon_r, omega, dl, order=2)
_, _, beta3 = modes.solve_modes(epsilon_r, omega, dl, order=3)
_, _, beta4 = modes.solve_modes(epsilon_r, omega, dl, order=4)
# Higher order modes should have successively smaller wave vectors.
self.assertGreater(beta1, beta2)
self.assertGreater(beta2, beta3)
self.assertGreater(beta3, beta4)
# Modes should always be guided, meaning that they are below the light line.
self.assertGreater(beta1 / k0, 1.)
self.assertGreater(beta2 / k0, 1.)
self.assertGreater(beta3 / k0, 1.)
# For the above waveguide design, the fourth order mode should be above the
# light line, meaning that beta4 / k0 < 1.0
self.assertLess(beta4 / k0, 1.)
if __name__ == '__main__':
absltest.main()
| {
"content_hash": "24974f3e8c8daf4d86ff133f15e59b23",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 80,
"avg_line_length": 27.554545454545455,
"alnum_prop": 0.6093698449356648,
"repo_name": "google/ceviche-challenges",
"id": "e733adde26b1875d7d972c849382918a4dff3190",
"size": "3606",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "ceviche_challenges/modes_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "154738"
}
],
"symlink_target": ""
} |
import os
import sys
import methods
def is_active():
return True
def get_name():
return "Windows"
def can_build():
if (os.name=="nt"):
#building natively on windows!
if (os.getenv("VSINSTALLDIR")):
return True
else:
print("\nMSVC not detected, attempting Mingw.")
mingw32 = ""
mingw64 = ""
if ( os.getenv("MINGW32_PREFIX") ) :
mingw32 = os.getenv("MINGW32_PREFIX")
if ( os.getenv("MINGW64_PREFIX") ) :
mingw64 = os.getenv("MINGW64_PREFIX")
test = "gcc --version > NUL 2>&1"
if os.system(test)!= 0 and os.system(mingw32+test)!=0 and os.system(mingw64+test)!=0 :
print("- could not detect gcc.")
print("Please, make sure a path to a Mingw /bin directory is accessible into the environment PATH.\n")
return False
else:
print("- gcc detected.")
return True
if (os.name=="posix"):
mingw = "i586-mingw32msvc-"
mingw64 = "x86_64-w64-mingw32-"
mingw32 = "i686-w64-mingw32-"
if (os.getenv("MINGW32_PREFIX")):
mingw32=os.getenv("MINGW32_PREFIX")
mingw = mingw32
if (os.getenv("MINGW64_PREFIX")):
mingw64=os.getenv("MINGW64_PREFIX")
test = "gcc --version &>/dev/null"
if (os.system(mingw+test) == 0 or os.system(mingw64+test) == 0 or os.system(mingw32+test) == 0):
return True
return False
def get_opts():
mingw=""
mingw32=""
mingw64=""
if ( os.name == "posix" ):
mingw = "i586-mingw32msvc-"
mingw32 = "i686-w64-mingw32-"
mingw64 = "x86_64-w64-mingw32-"
if os.system(mingw32+"gcc --version &>/dev/null") != 0 :
mingw32 = mingw
if (os.getenv("MINGW32_PREFIX")):
mingw32=os.getenv("MINGW32_PREFIX")
mingw = mingw32
if (os.getenv("MINGW64_PREFIX")):
mingw64=os.getenv("MINGW64_PREFIX")
return [
('mingw_prefix','Mingw Prefix',mingw32),
('mingw_prefix_64','Mingw Prefix 64 bits',mingw64),
]
def get_flags():
return [
('glew','yes'),
('openssl','builtin'), #use builtin openssl
]
def build_res_file( target, source, env ):
cmdbase = ""
if (env["bits"] == "32"):
cmdbase = env['mingw_prefix']
else:
cmdbase = env['mingw_prefix_64']
CPPPATH = env['CPPPATH']
cmdbase = cmdbase + 'windres --include-dir . '
import subprocess
for x in range(len(source)):
cmd = cmdbase + '-i ' + str(source[x]) + ' -o ' + str(target[x])
try:
out = subprocess.Popen(cmd,shell = True,stderr = subprocess.PIPE).communicate()
if len(out[1]):
return 1
except:
return 1
return 0
def configure(env):
env.Append(CPPPATH=['#platform/windows'])
env['is_mingw']=False
if (os.name=="nt" and os.getenv("VSINSTALLDIR")!=None):
#build using visual studio
env['ENV']['TMP'] = os.environ['TMP']
env.Append(CPPPATH=['#platform/windows/include'])
env.Append(LIBPATH=['#platform/windows/lib'])
if (env["target"]=="release"):
env.Append(CCFLAGS=['/O2'])
env.Append(LINKFLAGS=['/SUBSYSTEM:WINDOWS'])
env.Append(LINKFLAGS=['/ENTRY:mainCRTStartup'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['/O2','/DDEBUG_ENABLED'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
elif (env["target"]=="debug_release"):
env.Append(CCFLAGS=['/Z7','/Od'])
env.Append(LINKFLAGS=['/DEBUG'])
env.Append(LINKFLAGS=['/SUBSYSTEM:WINDOWS'])
env.Append(LINKFLAGS=['/ENTRY:mainCRTStartup'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['/Z7','/DDEBUG_ENABLED','/DDEBUG_MEMORY_ENABLED','/DD3D_DEBUG_INFO','/Od'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
env.Append(LINKFLAGS=['/DEBUG'])
env.Append(CCFLAGS=['/MT','/Gd','/GR','/nologo'])
env.Append(CXXFLAGS=['/TP'])
env.Append(CPPFLAGS=['/DMSVC', '/GR', ])
env.Append(CCFLAGS=['/I'+os.getenv("WindowsSdkDir")+"/Include"])
env.Append(CCFLAGS=['/DWINDOWS_ENABLED'])
env.Append(CCFLAGS=['/DRTAUDIO_ENABLED'])
env.Append(CCFLAGS=['/DWIN32'])
env.Append(CCFLAGS=['/DTYPED_METHOD_BIND'])
env.Append(CCFLAGS=['/DGLES2_ENABLED'])
LIBS=['winmm','opengl32','dsound','kernel32','ole32','oleaut32','user32','gdi32', 'IPHLPAPI','Shlwapi', 'wsock32','Ws2_32', 'shell32','advapi32','dinput8','dxguid']
env.Append(LINKFLAGS=[p+env["LIBSUFFIX"] for p in LIBS])
env.Append(LIBPATH=[os.getenv("WindowsSdkDir")+"/Lib"])
if (os.getenv("DXSDK_DIR")):
DIRECTX_PATH=os.getenv("DXSDK_DIR")
else:
DIRECTX_PATH="C:/Program Files/Microsoft DirectX SDK (March 2009)"
if (os.getenv("VCINSTALLDIR")):
VC_PATH=os.getenv("VCINSTALLDIR")
else:
VC_PATH=""
env.Append(CCFLAGS=["/I" + p for p in os.getenv("INCLUDE").split(";")])
env.Append(LIBPATH=[p for p in os.getenv("LIB").split(";")])
env.Append(CCFLAGS=["/I"+DIRECTX_PATH+"/Include"])
env.Append(LIBPATH=[DIRECTX_PATH+"/Lib/x86"])
env['ENV'] = os.environ;
# This detection function needs the tools env (that is env['ENV'], not SCons's env), and that is why it's this far bellow in the code
compiler_version_str = methods.detect_visual_c_compiler_version(env['ENV'])
# Note: this detection/override code from here onward should be here instead of in SConstruct because it's platform and compiler specific (MSVC/Windows)
if(env["bits"] != "default"):
print "Error: bits argument is disabled for MSVC"
print ("Bits argument is not supported for MSVC compilation. Architecture depends on the Native/Cross Compile Tools Prompt/Developer Console (or Visual Studio settings)"
+" that is being used to run SCons. As a consequence, bits argument is disabled. Run scons again without bits argument (example: scons p=windows) and SCons will attempt to detect what MSVC compiler"
+" will be executed and inform you.")
sys.exit()
# Forcing bits argument because MSVC does not have a flag to set this through SCons... it's different compilers (cl.exe's) called from the propper command prompt
# that decide the architecture that is build for. Scons can only detect the os.getenviron (because vsvarsall.bat sets a lot of stuff for cl.exe to work with)
env["bits"]="32"
env["x86_opt_vc"]=True
print "Detected MSVC compiler: "+compiler_version_str
# If building for 64bit architecture, disable assembly optimisations for 32 bit builds (theora as of writting)... vc compiler for 64bit can not compile _asm
if(compiler_version_str == "amd64" or compiler_version_str == "x86_amd64"):
env["bits"]="64"
env["x86_opt_vc"]=False
print "Compiled program architecture will be a 64 bit executable (forcing bits=64)."
elif (compiler_version_str=="x86" or compiler_version_str == "amd64_x86"):
print "Compiled program architecture will be a 32 bit executable. (forcing bits=32)."
else:
print "Failed to detect MSVC compiler architecture version... Defaulting to 32bit executable settings (forcing bits=32). Compilation attempt will continue, but SCons can not detect for what architecture this build is compiled for. You should check your settings/compilation setup."
if env["bits"]=="64":
env.Append(CCFLAGS=['/D_WIN64'])
# Incremental linking fix
env['BUILDERS']['ProgramOriginal'] = env['BUILDERS']['Program']
env['BUILDERS']['Program'] = methods.precious_program
else:
# Workaround for MinGW. See:
# http://www.scons.org/wiki/LongCmdLinesOnWin32
env.use_windows_spawn_fix()
#build using mingw
if (os.name=="nt"):
env['ENV']['TMP'] = os.environ['TMP'] #way to go scons, you can be so stupid sometimes
else:
env["PROGSUFFIX"]=env["PROGSUFFIX"]+".exe" # for linux cross-compilation
mingw_prefix=""
if (env["bits"]=="default"):
env["bits"]="32"
if (env["bits"]=="32"):
env.Append(LINKFLAGS=['-static'])
env.Append(LINKFLAGS=['-static-libgcc'])
env.Append(LINKFLAGS=['-static-libstdc++'])
mingw_prefix=env["mingw_prefix"];
else:
env.Append(LINKFLAGS=['-static'])
mingw_prefix=env["mingw_prefix_64"];
nulstr=""
if (os.name=="posix"):
nulstr=">/dev/null"
else:
nulstr=">nul"
# if os.system(mingw_prefix+"gcc --version"+nulstr)!=0:
# #not really super consistent but..
# print("Can't find Windows compiler: "+mingw_prefix)
# sys.exit(255)
if (env["target"]=="release"):
env.Append(CCFLAGS=['-msse2'])
if (env["bits"]=="64"):
env.Append(CCFLAGS=['-O3'])
else:
env.Append(CCFLAGS=['-O2'])
env.Append(LINKFLAGS=['-Wl,--subsystem,windows'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['-O2','-DDEBUG_ENABLED'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['-g', '-Wall','-DDEBUG_ENABLED','-DDEBUG_MEMORY_ENABLED'])
env["CC"]=mingw_prefix+"gcc"
env['AS']=mingw_prefix+"as"
env['CXX'] = mingw_prefix+"g++"
env['AR'] = mingw_prefix+"ar"
env['RANLIB'] = mingw_prefix+"ranlib"
env['LD'] = mingw_prefix+"g++"
env["x86_opt_gcc"]=True
#env['CC'] = "winegcc"
#env['CXX'] = "wineg++"
env.Append(CCFLAGS=['-DWINDOWS_ENABLED','-mwindows'])
env.Append(CPPFLAGS=['-DRTAUDIO_ENABLED'])
env.Append(CCFLAGS=['-DGLES2_ENABLED'])
env.Append(LIBS=['mingw32','opengl32', 'dsound', 'ole32', 'd3d9','winmm','gdi32','iphlpapi','shlwapi','wsock32','ws2_32','kernel32', 'oleaut32', 'dinput8', 'dxguid'])
# if (env["bits"]=="32"):
# env.Append(LIBS=['gcc_s'])
# #--with-arch=i686
# env.Append(CPPFLAGS=['-march=i686'])
# env.Append(LINKFLAGS=['-march=i686'])
#'d3dx9d'
env.Append(CPPFLAGS=['-DMINGW_ENABLED'])
#env.Append(LINKFLAGS=['-g'])
# resrc
env['is_mingw']=True
env.Append( BUILDERS = { 'RES' : env.Builder(action = build_res_file, suffix = '.o',src_suffix = '.rc') } )
env.Append( BUILDERS = { 'GLSL120' : env.Builder(action = methods.build_legacygl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL' : env.Builder(action = methods.build_glsl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'HLSL9' : env.Builder(action = methods.build_hlsl_dx9_headers, suffix = 'hlsl.h',src_suffix = '.hlsl') } )
env.Append( BUILDERS = { 'GLSL120GLES' : env.Builder(action = methods.build_gles2_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
| {
"content_hash": "c2a3331f2ce3153f93ce0d217cff9234",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 305,
"avg_line_length": 34.21452145214521,
"alnum_prop": 0.6310408025465419,
"repo_name": "rollenrolm/godot",
"id": "0548b84cfa68a8e33cee026671e0304a335483a5",
"size": "13600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "platform/windows/detect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "471707"
},
{
"name": "C++",
"bytes": "11879226"
},
{
"name": "Java",
"bytes": "493485"
},
{
"name": "Objective-C",
"bytes": "28194"
},
{
"name": "Objective-C++",
"bytes": "143453"
},
{
"name": "Python",
"bytes": "129866"
},
{
"name": "Shell",
"bytes": "266"
}
],
"symlink_target": ""
} |
import sys
from distutils.core import *
if 'setuptools' in sys.modules:
have_setuptools = True
from setuptools import setup as old_setup
# easy_install imports math, it may be picked up from cwd
from setuptools.command import easy_install
try:
# very old versions of setuptools don't have this
from setuptools.command import bdist_egg
except ImportError:
have_setuptools = False
else:
from distutils.core import setup as old_setup
have_setuptools = False
import warnings
import distutils.core
import distutils.dist
from numpy.distutils.extension import Extension
from numpy.distutils.numpy_distribution import NumpyDistribution
from numpy.distutils.command import config, config_compiler, \
build, build_py, build_ext, build_clib, build_src, build_scripts, \
sdist, install_data, install_headers, install, bdist_rpm, scons, \
install_clib
from numpy.distutils.misc_util import get_data_files, is_sequence, is_string
numpy_cmdclass = {'build': build.build,
'build_src': build_src.build_src,
'build_scripts': build_scripts.build_scripts,
'config_cc': config_compiler.config_cc,
'config_fc': config_compiler.config_fc,
'config': config.config,
'build_ext': build_ext.build_ext,
'build_py': build_py.build_py,
'build_clib': build_clib.build_clib,
'sdist': sdist.sdist,
'scons': scons.scons,
'install_data': install_data.install_data,
'install_headers': install_headers.install_headers,
'install_clib': install_clib.install_clib,
'install': install.install,
'bdist_rpm': bdist_rpm.bdist_rpm,
}
if have_setuptools:
# Use our own versions of develop and egg_info to ensure that build_src is
# handled appropriately.
from numpy.distutils.command import develop, egg_info
numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg
numpy_cmdclass['develop'] = develop.develop
numpy_cmdclass['easy_install'] = easy_install.easy_install
numpy_cmdclass['egg_info'] = egg_info.egg_info
def _dict_append(d, **kws):
for k,v in kws.items():
if k not in d:
d[k] = v
continue
dv = d[k]
if isinstance(dv, tuple):
d[k] = dv + tuple(v)
elif isinstance(dv, list):
d[k] = dv + list(v)
elif isinstance(dv, dict):
_dict_append(dv, **v)
elif is_string(dv):
d[k] = dv + v
else:
raise TypeError, repr(type(dv))
def _command_line_ok(_cache=[]):
""" Return True if command line does not contain any
help or display requests.
"""
if _cache:
return _cache[0]
ok = True
display_opts = ['--'+n for n in Distribution.display_option_names]
for o in Distribution.display_options:
if o[1]:
display_opts.append('-'+o[1])
for arg in sys.argv:
if arg.startswith('--help') or arg=='-h' or arg in display_opts:
ok = False
break
_cache.append(ok)
return ok
def get_distribution(always=False):
dist = distutils.core._setup_distribution
# XXX Hack to get numpy installable with easy_install.
# The problem is easy_install runs it's own setup(), which
# sets up distutils.core._setup_distribution. However,
# when our setup() runs, that gets overwritten and lost.
# We can't use isinstance, as the DistributionWithoutHelpCommands
# class is local to a function in setuptools.command.easy_install
if dist is not None and \
'DistributionWithoutHelpCommands' in repr(dist):
#raise NotImplementedError("setuptools not supported yet for numpy.scons branch")
dist = None
if always and dist is None:
dist = NumpyDistribution()
return dist
def _exit_interactive_session(_cache=[]):
if _cache:
return # been here
_cache.append(1)
print '-'*72
raw_input('Press ENTER to close the interactive session..')
print '='*72
def setup(**attr):
if len(sys.argv)<=1 and not attr.get('script_args',[]):
from interactive import interactive_sys_argv
import atexit
atexit.register(_exit_interactive_session)
sys.argv[:] = interactive_sys_argv(sys.argv)
if len(sys.argv)>1:
return setup(**attr)
cmdclass = numpy_cmdclass.copy()
new_attr = attr.copy()
if 'cmdclass' in new_attr:
cmdclass.update(new_attr['cmdclass'])
new_attr['cmdclass'] = cmdclass
if 'configuration' in new_attr:
# To avoid calling configuration if there are any errors
# or help request in command in the line.
configuration = new_attr.pop('configuration')
old_dist = distutils.core._setup_distribution
old_stop = distutils.core._setup_stop_after
distutils.core._setup_distribution = None
distutils.core._setup_stop_after = "commandline"
try:
dist = setup(**new_attr)
finally:
distutils.core._setup_distribution = old_dist
distutils.core._setup_stop_after = old_stop
if dist.help or not _command_line_ok():
# probably displayed help, skip running any commands
return dist
# create setup dictionary and append to new_attr
config = configuration()
if hasattr(config,'todict'):
config = config.todict()
_dict_append(new_attr, **config)
# Move extension source libraries to libraries
libraries = []
for ext in new_attr.get('ext_modules',[]):
new_libraries = []
for item in ext.libraries:
if is_sequence(item):
lib_name, build_info = item
_check_append_ext_library(libraries, item)
new_libraries.append(lib_name)
elif is_string(item):
new_libraries.append(item)
else:
raise TypeError("invalid description of extension module "
"library %r" % (item,))
ext.libraries = new_libraries
if libraries:
if 'libraries' not in new_attr:
new_attr['libraries'] = []
for item in libraries:
_check_append_library(new_attr['libraries'], item)
# sources in ext_modules or libraries may contain header files
if ('ext_modules' in new_attr or 'libraries' in new_attr) \
and 'headers' not in new_attr:
new_attr['headers'] = []
# Use our custom NumpyDistribution class instead of distutils' one
new_attr['distclass'] = NumpyDistribution
return old_setup(**new_attr)
def _check_append_library(libraries, item):
for libitem in libraries:
if is_sequence(libitem):
if is_sequence(item):
if item[0]==libitem[0]:
if item[1] is libitem[1]:
return
warnings.warn("[0] libraries list contains %r with"
" different build_info" % (item[0],))
break
else:
if item==libitem[0]:
warnings.warn("[1] libraries list contains %r with"
" no build_info" % (item[0],))
break
else:
if is_sequence(item):
if item[0]==libitem:
warnings.warn("[2] libraries list contains %r with"
" no build_info" % (item[0],))
break
else:
if item==libitem:
return
libraries.append(item)
def _check_append_ext_library(libraries, (lib_name,build_info)):
for item in libraries:
if is_sequence(item):
if item[0]==lib_name:
if item[1] is build_info:
return
warnings.warn("[3] libraries list contains %r with"
" different build_info" % (lib_name,))
break
elif item==lib_name:
warnings.warn("[4] libraries list contains %r with"
" no build_info" % (lib_name,))
break
libraries.append((lib_name,build_info))
| {
"content_hash": "7a390337b3bdb1141f41cb586c5cc7c3",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 89,
"avg_line_length": 37.71238938053097,
"alnum_prop": 0.5705737416402675,
"repo_name": "illume/numpy3k",
"id": "8481640bdbe2040606fb03908c233359258d8da2",
"size": "8524",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "numpy/distutils/core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4499625"
},
{
"name": "C++",
"bytes": "22396"
},
{
"name": "FORTRAN",
"bytes": "8946"
},
{
"name": "Python",
"bytes": "3740754"
},
{
"name": "Shell",
"bytes": "2035"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python3
# Copyright 2022, gRPC Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import datetime
TEMPLATE = """\
//-----------------------------------------------------------------------------
// THIS FILE WAS GENERATED WITH make-sample-certs.py
//
// DO NOT UPDATE MANUALLY
//-----------------------------------------------------------------------------
#if canImport(NIOSSL)
import struct Foundation.Date
import NIOSSL
/// Wraps `NIOSSLCertificate` to provide the certificate common name and expiry date.
public struct SampleCertificate {{
public var certificate: NIOSSLCertificate
public var commonName: String
public var notAfter: Date
public static let ca = SampleCertificate(
certificate: try! NIOSSLCertificate(bytes: .init(caCert.utf8), format: .pem),
commonName: "some-ca",
notAfter: Date(timeIntervalSince1970: {timestamp})
)
public static let otherCA = SampleCertificate(
certificate: try! NIOSSLCertificate(bytes: .init(otherCACert.utf8), format: .pem),
commonName: "some-other-ca",
notAfter: Date(timeIntervalSince1970: {timestamp})
)
public static let server = SampleCertificate(
certificate: try! NIOSSLCertificate(bytes: .init(serverCert.utf8), format: .pem),
commonName: "localhost",
notAfter: Date(timeIntervalSince1970: {timestamp})
)
public static let exampleServer = SampleCertificate(
certificate: try! NIOSSLCertificate(bytes: .init(exampleServerCert.utf8), format: .pem),
commonName: "example.com",
notAfter: Date(timeIntervalSince1970: {timestamp})
)
public static let serverSignedByOtherCA = SampleCertificate(
certificate: try! NIOSSLCertificate(bytes: .init(serverSignedByOtherCACert.utf8), format: .pem),
commonName: "localhost",
notAfter: Date(timeIntervalSince1970: {timestamp})
)
public static let client = SampleCertificate(
certificate: try! NIOSSLCertificate(bytes: .init(clientCert.utf8), format: .pem),
commonName: "localhost",
notAfter: Date(timeIntervalSince1970: {timestamp})
)
public static let clientSignedByOtherCA = SampleCertificate(
certificate: try! NIOSSLCertificate(bytes: .init(clientSignedByOtherCACert.utf8), format: .pem),
commonName: "localhost",
notAfter: Date(timeIntervalSince1970: {timestamp})
)
public static let exampleServerWithExplicitCurve = SampleCertificate(
certificate: try! NIOSSLCertificate(bytes: .init(serverExplicitCurveCert.utf8), format: .pem),
commonName: "localhost",
notAfter: Date(timeIntervalSince1970: {timestamp})
)
}}
extension SampleCertificate {{
/// Returns whether the certificate has expired.
public var isExpired: Bool {{
return self.notAfter < Date()
}}
}}
/// Provides convenience methods to make `NIOSSLPrivateKey`s for corresponding `GRPCSwiftCertificate`s.
public struct SamplePrivateKey {{
private init() {{}}
public static let server = try! NIOSSLPrivateKey(bytes: .init(serverKey.utf8), format: .pem)
public static let exampleServer = try! NIOSSLPrivateKey(
bytes: .init(exampleServerKey.utf8),
format: .pem
)
public static let client = try! NIOSSLPrivateKey(bytes: .init(clientKey.utf8), format: .pem)
public static let exampleServerWithExplicitCurve = try! NIOSSLPrivateKey(
bytes: .init(serverExplicitCurveKey.utf8),
format: .pem
)
}}
// MARK: - Certificates and private keys
private let caCert = \"""
{ca_cert}
\"""
private let otherCACert = \"""
{other_ca_cert}
\"""
private let serverCert = \"""
{server_cert}
\"""
private let serverSignedByOtherCACert = \"""
{server_signed_by_other_ca_cert}
\"""
private let serverKey = \"""
{server_key}
\"""
private let exampleServerCert = \"""
{example_server_cert}
\"""
private let exampleServerKey = \"""
{example_server_key}
\"""
private let clientCert = \"""
{client_cert}
\"""
private let clientSignedByOtherCACert = \"""
{client_signed_by_other_ca_cert}
\"""
private let clientKey = \"""
{client_key}
\"""
private let serverExplicitCurveCert = \"""
{server_explicit_curve_cert}
\"""
private let serverExplicitCurveKey = \"""
{server_explicit_curve_key}
\"""
#endif // canImport(NIOSSL)
"""
def load_file(root, name):
with open(os.path.join(root, name)) as fh:
return fh.read().strip()
def extract_key(ec_key_and_params):
lines = []
include_line = True
for line in ec_key_and_params.split("\n"):
if line == "-----BEGIN EC PARAMETERS-----":
include_line = False
elif line == "-----BEGIN EC PRIVATE KEY-----":
include_line = True
if include_line:
lines.append(line)
return "\n".join(lines).strip()
if __name__ == "__main__":
now = datetime.datetime.now()
# makecert uses an expiry of 365 days.
delta = datetime.timedelta(days=365)
# Seconds since epoch
not_after = (now + delta).strftime("%s")
# Expect to be called from the root of the checkout.
root = os.path.abspath(os.curdir)
executable = os.path.join(root, "scripts", "makecert")
try:
subprocess.check_call(executable)
except FileNotFoundError:
print("Please run the script from the root of the repository")
exit(1)
kwargs = {
"year": now.year,
"timestamp": not_after,
"ca_cert": load_file(root, "ca.crt"),
"other_ca_cert": load_file(root, "other-ca.crt"),
"server_cert": load_file(root, "server-localhost.crt"),
"server_signed_by_other_ca_cert": load_file(root, "server-localhost-other-ca.crt"),
"server_key": load_file(root, "server-localhost.key"),
"example_server_cert": load_file(root, "server-example.com.crt"),
"example_server_key": load_file(root, "server-example.com.key"),
"client_cert": load_file(root, "client.crt"),
"client_signed_by_other_ca_cert": load_file(root, "client-other-ca.crt"),
"client_key": load_file(root, "client.key"),
"server_explicit_curve_cert": load_file(root, "server-explicit-ec.crt"),
"server_explicit_curve_key": extract_key(load_file(root,
"server-explicit-ec.key"))
}
formatted = TEMPLATE.format(**kwargs)
with open("Sources/GRPCSampleData/GRPCSwiftCertificate.swift", "w") as fh:
fh.write(formatted)
| {
"content_hash": "061013529b2bd10046e411851ca8e273",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 103,
"avg_line_length": 31.14611872146119,
"alnum_prop": 0.6736548893124176,
"repo_name": "grpc/grpc-swift",
"id": "cc387515cc3e1fa4a508244965c564db0fca9ab5",
"size": "7440",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/make-sample-certs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2922"
},
{
"name": "Makefile",
"bytes": "4557"
},
{
"name": "Python",
"bytes": "17581"
},
{
"name": "Ruby",
"bytes": "888"
},
{
"name": "Shell",
"bytes": "32098"
},
{
"name": "Swift",
"bytes": "2626269"
}
],
"symlink_target": ""
} |
"""
Test that the breakpoint auto-continue flag works correctly.
"""
from __future__ import print_function
import os
import time
import re
import lldb
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test.lldbtest import *
class BreakpointAutoContinue(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def test_breakpoint_auto_continue(self):
"""Make sure the auto continue continues with no other complications"""
self.build()
self.simple_auto_continue()
def test_auto_continue_with_command(self):
"""Add a command, make sure the command gets run"""
self.build()
self.auto_continue_with_command()
def test_auto_continue_on_location(self):
"""Set auto-continue on a location and make sure only that location continues"""
self.build()
self.auto_continue_location()
def make_target_and_bkpt(self, additional_options=None, num_expected_loc=1,
pattern="Set a breakpoint here"):
exe = self.getBuildArtifact("a.out")
self.target = self.dbg.CreateTarget(exe)
self.assertTrue(self.target.IsValid(), "Target is not valid")
extra_options_txt = "--auto-continue 1 "
if additional_options:
extra_options_txt += additional_options
bpno = lldbutil.run_break_set_by_source_regexp(self, pattern,
extra_options = extra_options_txt,
num_expected_locations = num_expected_loc)
return bpno
def launch_it (self, expected_state):
error = lldb.SBError()
launch_info = lldb.SBLaunchInfo(None)
launch_info.SetWorkingDirectory(self.get_process_working_directory())
process = self.target.Launch(launch_info, error)
self.assertTrue(error.Success(), "Launch failed.")
state = process.GetState()
self.assertEqual(state, expected_state, "Didn't get expected state")
return process
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
def simple_auto_continue(self):
bpno = self.make_target_and_bkpt()
process = self.launch_it(lldb.eStateExited)
bkpt = self.target.FindBreakpointByID(bpno)
self.assertEqual(bkpt.GetHitCount(), 2, "Should have run through the breakpoint twice")
def auto_continue_with_command(self):
bpno = self.make_target_and_bkpt("-N BKPT -C 'break modify --auto-continue 0 BKPT'")
process = self.launch_it(lldb.eStateStopped)
state = process.GetState()
self.assertEqual(state, lldb.eStateStopped, "Process should be stopped")
bkpt = self.target.FindBreakpointByID(bpno)
threads = lldbutil.get_threads_stopped_at_breakpoint(process, bkpt)
self.assertEqual(len(threads), 1, "There was a thread stopped at our breakpoint")
self.assertEqual(bkpt.GetHitCount(), 2, "Should have hit the breakpoint twice")
def auto_continue_location(self):
bpno = self.make_target_and_bkpt(pattern="Set a[^ ]* breakpoint here", num_expected_loc=2)
bkpt = self.target.FindBreakpointByID(bpno)
bkpt.SetAutoContinue(False)
loc = lldb.SBBreakpointLocation()
for i in range(0,2):
func_name = bkpt.location[i].GetAddress().function.name
if func_name == "main":
loc = bkpt.location[i]
self.assertTrue(loc.IsValid(), "Didn't find a location in main")
loc.SetAutoContinue(True)
process = self.launch_it(lldb.eStateStopped)
threads = lldbutil.get_threads_stopped_at_breakpoint(process, bkpt)
self.assertEqual(len(threads), 1, "Didn't get one thread stopped at our breakpoint")
func_name = threads[0].frame[0].function.name
self.assertEqual(func_name, "call_me")
| {
"content_hash": "7501db64d11bbe89e7c12ac167c5511e",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 98,
"avg_line_length": 37.54807692307692,
"alnum_prop": 0.6448143405889885,
"repo_name": "youtube/cobalt",
"id": "b5e38eec579371759262a7d02967d097d5de3e41",
"size": "3905",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/llvm-project/lldb/packages/Python/lldbsuite/test/functionalities/breakpoint/auto_continue/TestBreakpointAutoContinue.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
import time
import dominate
import sys
from dominate.tags import *
import json
import pandas as pd
# Profile. Profile has data in it that is then used in sign system
#
#
# In[2]:
valname = ('lasnam', 'signin', 'usercom', 'dayr', 'htmn')
# In[2]:
# In[3]:
for itzval in valname:
print itzval
# This function creates a dict, another updates it.
# Function is working to create it but update is coming back with error.
#
# How do I refer to the created dict as the one to update?
# In[4]:
class DictNows():
def dictcreate(keyval, firnam):
return dict({keyval: firnam})
def updatedict(keyvalz, othnam):
return dictcreate.update({keyvalz: othnam})
# In[5]:
#checkdict = dictcreate('check', 'this')
# In[6]:
#checkdict
# In[7]:
def dictcreate(keyval, firnam):
return dict({keyval: firnam})
#def updatedict(keyvalz, othnam):
# return checkdict.update({keyvalz: othnam})
def returndate():
return time.strftime(("%d" + "-" + "%b" + "-" + "%Y"))
def returntime():
return time.strftime("%H:%M:%S")
def returan():
return os.urandom(16)
#def blahblah():
#open('/home/wcmckee/visignsys/posts/' + ixtwe + '.html', 'w')
#savpos.write(str(doc))
#savpos.close()
# In[7]:
# In[8]:
#updatedict('omg', 'not again')
# In[9]:
returan()
# In[10]:
returntime()
# In[11]:
#DictNows.dictcreate('check')
# In[12]:
dictcreate('name', 'wcm')
#updatedict()
# In[13]:
#updatedict('checking', 'this works')
# In[15]:
newprof = raw_input('New Profile y/n: ')
if 'y' in newprof:
lasnam = raw_input('Last Name: ')
firnam = raw_input('First Name: ')
dopz = raw_input('dob: ')
mname = ('William Mckee')
ename = raw_input('Email: ')
signin = raw_input('Reason: ')
usecom = raw_input('Comments: ')
elif 'n' in newprof:
lasnam = ("mckee")
firnam = ('First Name: ')
dopz = ('dob: ')
mname = ('William Mckee')
ename = ('Email: ')
signin = ('Reason: ')
usecom = ('Comments: ')
# In[16]:
#bitdict =
# In[17]:
betdict = dict()
# In[18]:
#betdict.update({'lastname': lasnam})
# In[19]:
dayr = time.strftime("%d" + "-" + "%b" + "-" + "%Y")
hrmn = time.strftime("%H:%M:%S")
# <codecell>
betdict.update({'last-name': lasnam})
betdict.update({'reason': signin})
betdict.update({'signin-comment': usecom})
betdict.update({'signin-date': returndate()})
betdict.update({'signin-hrmin': returntime()})
# In[20]:
betdict
# In[21]:
betjsn = json.dumps(betdict)
betjsn
# In[22]:
#for itz in updatedict():
# print itz
# In[23]:
opind = open('/home/wcmckee/visignsys/index.json', 'r')
opred = opind.read()
# In[24]:
opred
# In[25]:
opjsnd = json.dumps(opred)
# In[26]:
str(opjsnd)
# In[26]:
# In[27]:
#json.load(opred)
# In[27]:
# In[28]:
# <nbformat>3.0</nbformat>
# <markdowncell>
# <h2>visitor sign system</h2>
#
# This is a python script used to sign in and signout, keeping track of hours and creating a more automative system.
#
# Make sign in and out faster, easier to keep track of.
#
# Never forget.
#
# Auto roll check.
#
# Two random hex codes for security and correct checking. Made use of tthese by using one as file name when saving.
#
# Creates xls file with data, also uses sqlalchemy for databases, web server, html page:
# input (or auto) name, reason, auto day/month/year hr/min - of signin.
#
# when launched asked if you want to signin or signout.
#
# how i want this to run for william:
#
# william arrives into whai. On his phone he runs the signin script. On signing out for the day the script is run onto final part, signout. asks for comment first then records time, and date.
#
# comment system. leave comment for staff, parent, tag staff, area, story, parent, child.
#
# signout - enter code of session you want to signout.
#
# Screw the excel file, im just dealing with index page. i am saving achieve in posts folder under urandom 13 character code.
# <codecell>
doc = dominate.document(title='Visitor Sign Sheet')
with doc.head:
link(rel='stylesheet', href='style.css')
script(type='text/javascript', src='script.js')
with doc:
with div(id='header').add(ol()):
h1('Visitor Sign Sheet')
for i in betdict.values():
li(a(i))
with div():
attr(cls='body')
p(opred)
p('last updated: ' + time.strftime("%H:%M"))
p('Visitor Sign Sheet is open source')
a('http://github.com/wcmckee/wcmckee', href='https://github.com/wcmckee/wcmckee')
#print doc
# <codecell>
savindex = open('/home/wcmckee/visignsys/index.html', 'w')
# <codecell>
savindex.write(str(doc))
savindex.close()
# <codecell>
ixran = os.urandom(16)
ixtwe = ixran[0:16]
# <codecell>
savpos = open('/home/wcmckee/visignsys/posts/' + ixtwe + '.html', 'w')
savpos.write(str(doc))
savpos.close()
# <codecell>
savpos = open('/home/wcmckee/visignsys/posts/' + ixtwe + '.json', 'w')
savpos.write(str(betjsn))
savpos.close()
# <codecell>
#savpos = open('/home/wcmckee/visignsys/index.meta', 'w')
#savpos.write(str(wsdict.keys()))
#savpos.close()
# <codecell>
savpos = open('/home/wcmckee/visignsys/index.json', 'a')
savpos.write(str(betjsn))
savpos.close()
print ('sign in complete')
# In[29]:
reser = pd.Series(opred)
# In[30]:
#pd.DataFrame(reser)
# In[31]:
rezda = []
# In[32]:
for res in reser:
print res
rezda.append(res)
# In[33]:
rezda
# In[34]:
len(rezda)
# In[34]:
# In[32]:
# In[ ]:
| {
"content_hash": "a35eaf59d2589bf912be191e1c9fc459",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 192,
"avg_line_length": 15.178571428571429,
"alnum_prop": 0.6264253393665158,
"repo_name": "wcmckee/wcmckee",
"id": "c1b4378b9e667ca52cc4dc18b515f74594619335",
"size": "43510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "5995765"
},
{
"name": "Python",
"bytes": "152966"
}
],
"symlink_target": ""
} |
with open('input', 'r') as f:
data = f.read()
cur_floor = 0
floor_map = {}
for pos, move in enumerate(data):
if move == '(':
cur_floor = cur_floor + 1
if move == ')':
cur_floor = cur_floor - 1
floor_map[pos] = cur_floor
first_underground_pos = -1
for pos, floor in floor_map.iteritems():
if floor < 0:
first_underground_pos = pos
break
if first_underground_pos < 0:
print 'Never goes underground'
# Because enumerate() is zero-based, but character positions in this puzzle are
# one-based
print 'First underground floor at position {}'.format(first_underground_pos + 1)
| {
"content_hash": "2646057a1616ebc3781313e3a33ab9d5",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 80,
"avg_line_length": 24.307692307692307,
"alnum_prop": 0.629746835443038,
"repo_name": "jatowler/adventofcode-2015",
"id": "6dcfa09b949b82f2029cd78c71aeffafdc087e0e",
"size": "655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "1/part2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57101"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
def prerelease_local_scheme(version):
"""Return local scheme version unless building on master in CircleCI.
This function returns the local scheme version number
(e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a
pre-release in which case it ignores the hash and produces a
PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>).
"""
from setuptools_scm.version import get_local_node_and_date
if os.getenv('CIRCLE_BRANCH') == 'master':
return ''
else:
return get_local_node_and_date(version)
with open('README.rst') as f:
readme = f.read()
installReqs = [
'boto3',
'botocore',
'CherryPy',
'click',
'click-plugins',
'dogpile.cache',
'filelock',
'jsonschema',
'Mako',
'passlib [bcrypt,totp]',
'pymongo>=3.6,<4',
'PyYAML',
'psutil',
'pyOpenSSL',
'python-dateutil',
'pytz',
'requests',
]
extrasReqs = {
'sftp': [
'paramiko'
],
'mount': [
'fusepy>=3.0'
]
}
setup(
name='girder',
use_scm_version={'local_scheme': prerelease_local_scheme},
setup_requires=[
'setuptools-scm<7 ; python_version < "3.7"',
'setuptools-scm ; python_version >= "3.7"',
],
description='Web-based data management platform',
long_description=readme,
author='Kitware, Inc.',
author_email='kitware@kitware.com',
url='https://girder.readthedocs.org',
license='Apache 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
packages=find_packages(
exclude=('girder.test', 'tests.*', 'tests', '*.plugin_tests.*', '*.plugin_tests')
),
include_package_data=True,
python_requires='>=3.6',
install_requires=installReqs,
extras_require=extrasReqs,
zip_safe=False,
entry_points={
'console_scripts': [
'girder-server = girder.cli.serve:main',
'girder-sftpd = girder.cli.sftpd:main',
'girder-shell = girder.cli.shell:main',
'girder = girder.cli:main'
],
'girder.cli_plugins': [
'serve = girder.cli.serve:main',
'mount = girder.cli.mount:main',
'shell = girder.cli.shell:main',
'sftpd = girder.cli.sftpd:main',
'build = girder.cli.build:main'
]
}
)
| {
"content_hash": "e4e5e852e52127d4b533d4194617c888",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 89,
"avg_line_length": 27.97,
"alnum_prop": 0.585269932070075,
"repo_name": "manthey/girder",
"id": "4daf1f9e7b571d7193f4e8fb32e579eb75220eed",
"size": "2821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "26244"
},
{
"name": "CSS",
"bytes": "6537"
},
{
"name": "Dockerfile",
"bytes": "1528"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "JavaScript",
"bytes": "1175790"
},
{
"name": "Jinja",
"bytes": "322"
},
{
"name": "Mako",
"bytes": "7571"
},
{
"name": "Pug",
"bytes": "137980"
},
{
"name": "Python",
"bytes": "2016093"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Shell",
"bytes": "3354"
},
{
"name": "Stylus",
"bytes": "48706"
}
],
"symlink_target": ""
} |
from django.db import models
class Tag(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class Content(models.Model):
description = models.CharField(max_length=100)
image = models.ImageField(upload_to='images')
tags = models.ManyToManyField('Tag', null=True, blank=True)
# Other relevant fields here ...
def __str__(self):
return self.description
| {
"content_hash": "ff9e78a60e099a4135c27ffa82b14605",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 64,
"avg_line_length": 25.941176470588236,
"alnum_prop": 0.6621315192743764,
"repo_name": "imiric/test-widget",
"id": "f30a207176d71654b32d380559dea2673bc3c9de",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "widget/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "379"
},
{
"name": "Python",
"bytes": "8492"
},
{
"name": "Shell",
"bytes": "110"
}
],
"symlink_target": ""
} |
import config
from metodos import *
from mensagens import *
adms = config.adms
def ban(msg):
if msg['text'] == '/ban':
from_id = msg['from']['id']
chat_id = msg['chat']['id']
if from_id in adms:
try:
reply_id = msg['reply_to_message']['from']['id']
reply = True
except:
reply = False
if reply == True:
if 'error_code' not in kickChatMember(chat_id, reply_id):
sendMessage(chat_id, ban['banido'].format(msg['from']['first_name']))
else:
sendMessage(chat_id,ban['404'])
else:
sendMessage(chat_id, erros['reply'])
else:
sendMessage(chat_id,erros['admin'])
def unban(msg):
if msg['text'] == '/desban':
from_id = msg['from']['id']
chat_id = msg['chat']['id']
if from_id in adms:
try:
reply_id = msg['reply_to_message']['from']['id']
reply = True
except:
reply = False
if reply == True:
if 'error_code' not in unbanChatMember(chat_id, reply_id):
sendMessage(chat_id, ban['desbanido'].format(msg['from']['first_name']))
else:
sendMessage(chat_id,ban['404'])
else:
sendMessage(chat_id, erros['reply'])
else:
sendMessage(chat_id,erros['admin'])
def kick(msg):
if msg['text'] == '/kick':
from_id = msg['from']['id']
chat_id = msg['chat']['id']
if from_id in adms:
try:
reply_id = msg['reply_to_message']['from']['id']
reply = True
except:
reply = False
if reply == True:
if 'error_code' not in kickChatMember(chat_id, reply_id) and 'error_code' not in unbanChatMember(chat_id, reply_id):
sendMessage(chat_id, ban['kickado'].format(msg['from']['first_name']))
else:
sendMessage(chat_id,ban['404'])
else:
sendMessage(chat_id, erros['reply'])
else:
sendMessage(chat_id,erros['admin'])
def banhammer(msg):
ban(msg)
kick(msg)
unban(msg) | {
"content_hash": "077a8642406a1536aa0d2b1cd61b7a67",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 134,
"avg_line_length": 29,
"alnum_prop": 0.4670072371221797,
"repo_name": "francis-taylor/Timotty-Master",
"id": "9cba1505fccc77c1a17f6854e8ee5d401fbab785",
"size": "2373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cybot/plug/banhammer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "138728"
}
],
"symlink_target": ""
} |
from ._models_py3 import ApiError
from ._models_py3 import ApiErrorBase
from ._models_py3 import CommunityGallery
from ._models_py3 import CommunityGalleryImage
from ._models_py3 import CommunityGalleryImageList
from ._models_py3 import CommunityGalleryImageVersion
from ._models_py3 import CommunityGalleryImageVersionList
from ._models_py3 import CommunityGalleryInfo
from ._models_py3 import DataDiskImageEncryption
from ._models_py3 import Disallowed
from ._models_py3 import DiskImageEncryption
from ._models_py3 import EncryptionImages
from ._models_py3 import Gallery
from ._models_py3 import GalleryApplication
from ._models_py3 import GalleryApplicationCustomAction
from ._models_py3 import GalleryApplicationCustomActionParameter
from ._models_py3 import GalleryApplicationList
from ._models_py3 import GalleryApplicationUpdate
from ._models_py3 import GalleryApplicationVersion
from ._models_py3 import GalleryApplicationVersionList
from ._models_py3 import GalleryApplicationVersionPublishingProfile
from ._models_py3 import GalleryApplicationVersionSafetyProfile
from ._models_py3 import GalleryApplicationVersionUpdate
from ._models_py3 import GalleryArtifactPublishingProfileBase
from ._models_py3 import GalleryArtifactSafetyProfileBase
from ._models_py3 import GalleryArtifactSource
from ._models_py3 import GalleryArtifactVersionFullSource
from ._models_py3 import GalleryArtifactVersionSource
from ._models_py3 import GalleryDataDiskImage
from ._models_py3 import GalleryDiskImage
from ._models_py3 import GalleryDiskImageSource
from ._models_py3 import GalleryExtendedLocation
from ._models_py3 import GalleryIdentifier
from ._models_py3 import GalleryImage
from ._models_py3 import GalleryImageFeature
from ._models_py3 import GalleryImageIdentifier
from ._models_py3 import GalleryImageList
from ._models_py3 import GalleryImageUpdate
from ._models_py3 import GalleryImageVersion
from ._models_py3 import GalleryImageVersionList
from ._models_py3 import GalleryImageVersionPublishingProfile
from ._models_py3 import GalleryImageVersionSafetyProfile
from ._models_py3 import GalleryImageVersionStorageProfile
from ._models_py3 import GalleryImageVersionUpdate
from ._models_py3 import GalleryList
from ._models_py3 import GalleryOSDiskImage
from ._models_py3 import GalleryTargetExtendedLocation
from ._models_py3 import GalleryUpdate
from ._models_py3 import ImagePurchasePlan
from ._models_py3 import InnerError
from ._models_py3 import LatestGalleryImageVersion
from ._models_py3 import ManagedArtifact
from ._models_py3 import OSDiskImageEncryption
from ._models_py3 import OSDiskImageSecurityProfile
from ._models_py3 import PirCommunityGalleryResource
from ._models_py3 import PirResource
from ._models_py3 import PirSharedGalleryResource
from ._models_py3 import PolicyViolation
from ._models_py3 import RecommendedMachineConfiguration
from ._models_py3 import RegionalReplicationStatus
from ._models_py3 import RegionalSharingStatus
from ._models_py3 import ReplicationStatus
from ._models_py3 import Resource
from ._models_py3 import ResourceRange
from ._models_py3 import SharedGallery
from ._models_py3 import SharedGalleryDataDiskImage
from ._models_py3 import SharedGalleryDiskImage
from ._models_py3 import SharedGalleryImage
from ._models_py3 import SharedGalleryImageList
from ._models_py3 import SharedGalleryImageVersion
from ._models_py3 import SharedGalleryImageVersionList
from ._models_py3 import SharedGalleryImageVersionStorageProfile
from ._models_py3 import SharedGalleryList
from ._models_py3 import SharedGalleryOSDiskImage
from ._models_py3 import SharingProfile
from ._models_py3 import SharingProfileGroup
from ._models_py3 import SharingStatus
from ._models_py3 import SharingUpdate
from ._models_py3 import SoftDeletePolicy
from ._models_py3 import TargetRegion
from ._models_py3 import UpdateResourceDefinition
from ._models_py3 import UserArtifactManage
from ._models_py3 import UserArtifactSettings
from ._models_py3 import UserArtifactSource
from ._compute_management_client_enums import AggregatedReplicationState
from ._compute_management_client_enums import Architecture
from ._compute_management_client_enums import ConfidentialVMEncryptionType
from ._compute_management_client_enums import GalleryApplicationCustomActionParameterType
from ._compute_management_client_enums import GalleryExpandParams
from ._compute_management_client_enums import GalleryExtendedLocationType
from ._compute_management_client_enums import GalleryProvisioningState
from ._compute_management_client_enums import GallerySharingPermissionTypes
from ._compute_management_client_enums import HostCaching
from ._compute_management_client_enums import HyperVGeneration
from ._compute_management_client_enums import OperatingSystemStateTypes
from ._compute_management_client_enums import OperatingSystemTypes
from ._compute_management_client_enums import PolicyViolationCategory
from ._compute_management_client_enums import ReplicationMode
from ._compute_management_client_enums import ReplicationState
from ._compute_management_client_enums import ReplicationStatusTypes
from ._compute_management_client_enums import SelectPermissions
from ._compute_management_client_enums import SharedGalleryHostCaching
from ._compute_management_client_enums import SharedToValues
from ._compute_management_client_enums import SharingProfileGroupTypes
from ._compute_management_client_enums import SharingState
from ._compute_management_client_enums import SharingUpdateOperationTypes
from ._compute_management_client_enums import StorageAccountType
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"ApiError",
"ApiErrorBase",
"CommunityGallery",
"CommunityGalleryImage",
"CommunityGalleryImageList",
"CommunityGalleryImageVersion",
"CommunityGalleryImageVersionList",
"CommunityGalleryInfo",
"DataDiskImageEncryption",
"Disallowed",
"DiskImageEncryption",
"EncryptionImages",
"Gallery",
"GalleryApplication",
"GalleryApplicationCustomAction",
"GalleryApplicationCustomActionParameter",
"GalleryApplicationList",
"GalleryApplicationUpdate",
"GalleryApplicationVersion",
"GalleryApplicationVersionList",
"GalleryApplicationVersionPublishingProfile",
"GalleryApplicationVersionSafetyProfile",
"GalleryApplicationVersionUpdate",
"GalleryArtifactPublishingProfileBase",
"GalleryArtifactSafetyProfileBase",
"GalleryArtifactSource",
"GalleryArtifactVersionFullSource",
"GalleryArtifactVersionSource",
"GalleryDataDiskImage",
"GalleryDiskImage",
"GalleryDiskImageSource",
"GalleryExtendedLocation",
"GalleryIdentifier",
"GalleryImage",
"GalleryImageFeature",
"GalleryImageIdentifier",
"GalleryImageList",
"GalleryImageUpdate",
"GalleryImageVersion",
"GalleryImageVersionList",
"GalleryImageVersionPublishingProfile",
"GalleryImageVersionSafetyProfile",
"GalleryImageVersionStorageProfile",
"GalleryImageVersionUpdate",
"GalleryList",
"GalleryOSDiskImage",
"GalleryTargetExtendedLocation",
"GalleryUpdate",
"ImagePurchasePlan",
"InnerError",
"LatestGalleryImageVersion",
"ManagedArtifact",
"OSDiskImageEncryption",
"OSDiskImageSecurityProfile",
"PirCommunityGalleryResource",
"PirResource",
"PirSharedGalleryResource",
"PolicyViolation",
"RecommendedMachineConfiguration",
"RegionalReplicationStatus",
"RegionalSharingStatus",
"ReplicationStatus",
"Resource",
"ResourceRange",
"SharedGallery",
"SharedGalleryDataDiskImage",
"SharedGalleryDiskImage",
"SharedGalleryImage",
"SharedGalleryImageList",
"SharedGalleryImageVersion",
"SharedGalleryImageVersionList",
"SharedGalleryImageVersionStorageProfile",
"SharedGalleryList",
"SharedGalleryOSDiskImage",
"SharingProfile",
"SharingProfileGroup",
"SharingStatus",
"SharingUpdate",
"SoftDeletePolicy",
"TargetRegion",
"UpdateResourceDefinition",
"UserArtifactManage",
"UserArtifactSettings",
"UserArtifactSource",
"AggregatedReplicationState",
"Architecture",
"ConfidentialVMEncryptionType",
"GalleryApplicationCustomActionParameterType",
"GalleryExpandParams",
"GalleryExtendedLocationType",
"GalleryProvisioningState",
"GallerySharingPermissionTypes",
"HostCaching",
"HyperVGeneration",
"OperatingSystemStateTypes",
"OperatingSystemTypes",
"PolicyViolationCategory",
"ReplicationMode",
"ReplicationState",
"ReplicationStatusTypes",
"SelectPermissions",
"SharedGalleryHostCaching",
"SharedToValues",
"SharingProfileGroupTypes",
"SharingState",
"SharingUpdateOperationTypes",
"StorageAccountType",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| {
"content_hash": "0deecd6ce5456dcf88f2a38879f30c35",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 89,
"avg_line_length": 40.318385650224215,
"alnum_prop": 0.8064731398064732,
"repo_name": "Azure/azure-sdk-for-python",
"id": "164dff985f0feb390a870faa0f0146cd622f8113",
"size": "9459",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_03/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import textwrap
from contextlib import closing
from xml.etree import ElementTree
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.subsystems.shader import Shader
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.tasks.jvm_compile.analysis_tools import AnalysisTools
from pants.backend.jvm.tasks.jvm_compile.jvm_compile import JvmCompile
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_analysis import ZincAnalysis
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_analysis_parser import ZincAnalysisParser
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.hash_utils import hash_file
from pants.base.workunit import WorkUnitLabel
from pants.java.distribution.distribution import DistributionLocator
from pants.option.custom_types import dict_option
from pants.util.contextutil import open_zip
from pants.util.dirutil import relativize_paths, safe_open
# Well known metadata file required to register scalac plugins with nsc.
_PLUGIN_INFO_FILE = 'scalac-plugin.xml'
class ZincCompile(JvmCompile):
"""Compile Scala and Java code using Zinc."""
_ZINC_MAIN = 'org.pantsbuild.zinc.Main'
_name = 'zinc'
_supports_concurrent_execution = True
@staticmethod
def write_plugin_info(resources_dir, target):
root = os.path.join(resources_dir, target.id)
plugin_info_file = os.path.join(root, _PLUGIN_INFO_FILE)
with safe_open(plugin_info_file, 'w') as f:
f.write(textwrap.dedent("""
<plugin>
<name>{}</name>
<classname>{}</classname>
</plugin>
""".format(target.plugin, target.classname)).strip())
return root, plugin_info_file
@staticmethod
def validate_arguments(log, whitelisted_args, args):
"""Validate that all arguments match whitelisted regexes."""
valid_patterns = {re.compile(p): v for p, v in whitelisted_args.items()}
def validate(arg_index):
arg = args[arg_index]
for pattern, has_argument in valid_patterns.items():
if pattern.match(arg):
return 2 if has_argument else 1
log.warn("Zinc argument '{}' is not supported, and is subject to change/removal!".format(arg))
return 1
arg_index = 0
while arg_index < len(args):
arg_index += validate(arg_index)
@classmethod
def subsystem_dependencies(cls):
return super(ZincCompile, cls).subsystem_dependencies() + (ScalaPlatform, DistributionLocator)
@classmethod
def get_args_default(cls, bootstrap_option_values):
return ('-S-encoding', '-SUTF-8', '-S-g:vars')
@classmethod
def get_warning_args_default(cls):
return ('-S-deprecation', '-S-unchecked')
@classmethod
def get_no_warning_args_default(cls):
return ('-S-nowarn',)
@classmethod
def register_options(cls, register):
super(ZincCompile, cls).register_options(register)
register('--plugins', advanced=True, action='append', fingerprint=True,
help='Use these scalac plugins.')
register('--plugin-args', advanced=True, type=dict_option, default={}, fingerprint=True,
help='Map from plugin name to list of arguments for that plugin.')
# TODO: disable by default because it breaks dependency parsing:
# https://github.com/pantsbuild/pants/issues/2224
# ...also, as of sbt 0.13.9, it is significantly slower for cold builds.
register('--name-hashing', advanced=True, action='store_true', default=False, fingerprint=True,
help='Use zinc name hashing.')
register('--whitelisted-args', advanced=True, type=dict_option,
default={
'-S.*': False,
'-C.*': False,
'-file-filter': True,
'-msg-filter': True,
},
help='A dict of option regexes that make up pants\' supported API for zinc. '
'Options not listed here are subject to change/removal. The value of the dict '
'indicates that an option accepts an argument.')
cls.register_jvm_tool(register,
'zinc',
classpath=[
JarDependency('org.pantsbuild', 'zinc', '1.0.12')
],
main=cls._ZINC_MAIN,
custom_rules=[
# The compiler-interface and sbt-interface tool jars carry xsbt and
# xsbti interfaces that are used across the shaded tool jar boundary so
# we preserve these root packages wholesale along with the core scala
# APIs.
Shader.exclude_package('scala', recursive=True),
Shader.exclude_package('xsbt', recursive=True),
Shader.exclude_package('xsbti', recursive=True),
])
def sbt_jar(name, **kwargs):
return JarDependency(org='com.typesafe.sbt', name=name, rev='0.13.9', **kwargs)
cls.register_jvm_tool(register,
'compiler-interface',
classpath=[
sbt_jar(name='compiler-interface',
classifier='sources',
# We just want the single compiler-interface jar and not its
# dep on scala-lang
intransitive=True)
])
cls.register_jvm_tool(register,
'sbt-interface',
classpath=[
sbt_jar(name='sbt-interface',
# We just want the single sbt-interface jar and not its dep
# on scala-lang
intransitive=True)
])
# By default we expect no plugin-jars classpath_spec is filled in by the user, so we accept an
# empty classpath.
cls.register_jvm_tool(register, 'plugin-jars', classpath=[])
@classmethod
def prepare(cls, options, round_manager):
super(ZincCompile, cls).prepare(options, round_manager)
ScalaPlatform.prepare_tools(round_manager)
def select(self, target):
return target.has_sources('.java') or target.has_sources('.scala')
def select_source(self, source_file_path):
return source_file_path.endswith('.java') or source_file_path.endswith('.scala')
def __init__(self, *args, **kwargs):
super(ZincCompile, self).__init__(*args, **kwargs)
# A directory independent of any other classpath which can contain per-target
# plugin resource files.
self._plugin_info_dir = os.path.join(self.workdir, 'scalac-plugin-info')
self._lazy_plugin_args = None
# Validate zinc options
ZincCompile.validate_arguments(self.context.log, self.get_options().whitelisted_args, self._args)
def create_analysis_tools(self):
return AnalysisTools(DistributionLocator.cached().real_home, ZincAnalysisParser(), ZincAnalysis)
def zinc_classpath(self):
# Zinc takes advantage of tools.jar if it's presented in classpath.
# For example com.sun.tools.javac.Main is used for in process java compilation.
def locate_tools_jar():
try:
return DistributionLocator.cached(jdk=True).find_libs(['tools.jar'])
except DistributionLocator.Error:
self.context.log.info('Failed to locate tools.jar. '
'Install a JDK to increase performance of Zinc.')
return []
return self.tool_classpath('zinc') + locate_tools_jar()
def compiler_classpath(self):
return ScalaPlatform.global_instance().compiler_classpath(self.context.products)
def extra_compile_time_classpath_elements(self):
# Classpath entries necessary for our compiler plugins.
return self.plugin_jars()
def plugin_jars(self):
"""The classpath entries for jars containing code for enabled plugins."""
if self.get_options().plugins:
return self.tool_classpath('plugin-jars')
else:
return []
def plugin_args(self):
if self._lazy_plugin_args is None:
self._lazy_plugin_args = self._create_plugin_args()
return self._lazy_plugin_args
def _create_plugin_args(self):
if not self.get_options().plugins:
return []
plugin_args = self.get_options().plugin_args
active_plugins = self._find_plugins()
ret = []
for name, jar in active_plugins.items():
ret.append('-S-Xplugin:{}'.format(jar))
for arg in plugin_args.get(name, []):
ret.append('-S-P:{}:{}'.format(name, arg))
return ret
def _find_plugins(self):
"""Returns a map from plugin name to plugin jar."""
# Allow multiple flags and also comma-separated values in a single flag.
plugin_names = set([p for val in self.get_options().plugins for p in val.split(',')])
plugins = {}
buildroot = get_buildroot()
for jar in self.plugin_jars():
with open_zip(jar, 'r') as jarfile:
try:
with closing(jarfile.open(_PLUGIN_INFO_FILE, 'r')) as plugin_info_file:
plugin_info = ElementTree.parse(plugin_info_file).getroot()
if plugin_info.tag != 'plugin':
raise TaskError(
'File {} in {} is not a valid scalac plugin descriptor'.format(_PLUGIN_INFO_FILE,
jar))
name = plugin_info.find('name').text
if name in plugin_names:
if name in plugins:
raise TaskError('Plugin {} defined in {} and in {}'.format(name, plugins[name], jar))
# It's important to use relative paths, as the compiler flags get embedded in the zinc
# analysis file, and we port those between systems via the artifact cache.
plugins[name] = os.path.relpath(jar, buildroot)
except KeyError:
pass
unresolved_plugins = plugin_names - set(plugins.keys())
if unresolved_plugins:
raise TaskError('Could not find requested plugins: {}'.format(list(unresolved_plugins)))
return plugins
def extra_products(self, target):
"""Override extra_products to produce a plugin information file."""
ret = []
if target.is_scalac_plugin and target.classname:
# NB: We don't yet support explicit in-line compilation of scala compiler plugins from
# the workspace to be used in subsequent compile rounds like we do for annotation processors
# with javac. This would require another GroupTask similar to AptCompile, but for scala.
root, plugin_info_file = self.write_plugin_info(self._plugin_info_dir, target)
ret.append((root, [plugin_info_file]))
return ret
def compile(self, args, classpath, sources, classes_output_dir, upstream_analysis, analysis_file,
log_file, settings):
# We add compiler_classpath to ensure the scala-library jar is on the classpath.
# TODO: This also adds the compiler jar to the classpath, which compiled code shouldn't
# usually need. Be more selective?
# TODO(John Sirois): Do we need to do this at all? If adding scala-library to the classpath is
# only intended to allow target authors to omit a scala-library dependency, then ScalaLibrary
# already overrides traversable_dependency_specs to achieve the same end; arguably at a more
# appropriate level and certainly at a more appropriate granularity.
relativized_classpath = relativize_paths(self.compiler_classpath() + classpath, get_buildroot())
zinc_args = []
zinc_args.extend([
'-log-level', self.get_options().level,
'-analysis-cache', analysis_file,
'-classpath', ':'.join(relativized_classpath),
'-d', classes_output_dir
])
if not self.get_options().colors:
zinc_args.append('-no-color')
if not self.get_options().name_hashing:
zinc_args.append('-no-name-hashing')
if log_file:
zinc_args.extend(['-capture-log', log_file])
zinc_args.extend(['-compiler-interface', self.tool_jar('compiler-interface')])
zinc_args.extend(['-sbt-interface', self.tool_jar('sbt-interface')])
zinc_args.extend(['-scala-path', ':'.join(self.compiler_classpath())])
zinc_args += self.plugin_args()
if upstream_analysis:
zinc_args.extend(['-analysis-map',
','.join('{}:{}'.format(*kv) for kv in upstream_analysis.items())])
zinc_args += args
zinc_args.extend([
'-C-source', '-C{}'.format(settings.source_level),
'-C-target', '-C{}'.format(settings.target_level),
])
zinc_args.extend(settings.args)
jvm_options = list(self._jvm_options)
zinc_args.extend(sources)
self.log_zinc_file(analysis_file)
if self.runjava(classpath=self.zinc_classpath(),
main=self._ZINC_MAIN,
jvm_options=jvm_options,
args=zinc_args,
workunit_name='zinc',
workunit_labels=[WorkUnitLabel.COMPILER]):
raise TaskError('Zinc compile failed.')
def log_zinc_file(self, analysis_file):
self.context.log.debug('Calling zinc on: {} ({})'
.format(analysis_file,
hash_file(analysis_file).upper()
if os.path.exists(analysis_file)
else 'nonexistent'))
| {
"content_hash": "131ec1785b79bd2236b71d5a571c470d",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 101,
"avg_line_length": 42.493788819875775,
"alnum_prop": 0.6263977197982898,
"repo_name": "sameerparekh/pants",
"id": "64eb4a04a0d24cc32603f6b2a2e62fc15f514904",
"size": "13830",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/jvm/tasks/jvm_compile/zinc/zinc_compile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11442"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "70150"
},
{
"name": "Java",
"bytes": "308102"
},
{
"name": "JavaScript",
"bytes": "25075"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "3862954"
},
{
"name": "Scala",
"bytes": "85437"
},
{
"name": "Shell",
"bytes": "49265"
},
{
"name": "Thrift",
"bytes": "2858"
}
],
"symlink_target": ""
} |
from .test_cases import * # noqa
| {
"content_hash": "296b68269c9cd27fe1ceb5662f3befaf",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 33,
"avg_line_length": 34,
"alnum_prop": 0.6764705882352942,
"repo_name": "mikeurbanski/django-inlines",
"id": "c644cdf9055597803c584cead9c4f8ac89569af8",
"size": "34",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "74986"
}
],
"symlink_target": ""
} |
def arithmetic_series(start, inc):
while start < 100:
yield start
start+=inc
series = arithmetic_series(5, 10)
print(series)
nlkmklklkngjhbbjhjhbkhbhbkbnjknnjlnknlnkjbjhbhbbbbkbgbcfvcvvnbvvmnvmbbnbnbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb = 9
if '1'==1:
print("She said "Yes"")
else:
print("fail")
print(next(series))
print(next(series))
print(next(series))
print(next(series))
print(next(series))
print(next(series))
print(next(series))
print(next(series))
print(next(series))
print(next(series))
#print(next(series)) | {
"content_hash": "828f77d86b9cc783ae5bbc20fc745b3d",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 131,
"avg_line_length": 23.416666666666668,
"alnum_prop": 0.7455516014234875,
"repo_name": "balajithangamani/LearnPy",
"id": "30c1a0a866f0ea86045cc283edd6aadf49574519",
"size": "562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LearnPython3/generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3528"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
from contextlib import contextmanager
import os,sys
PSSE_LOCATION = r"C:\Program Files (x86)\PTI\PSSE32\PSSBIN"
sys.path.append(PSSE_LOCATION)
os.environ['PATH'] = os.environ['PATH'] + ';' + PSSE_LOCATION
import psspy # importing python
from psspy import _i,_f # importing the default integer and float values used by PSS\E(every API uses them)
import redirect
redirect.psse2py() # redirecting PSS\E output to python)
import numpy
import scipy
from scipy import special,optimize
import StringIO
@contextmanager
def silence(file_object=None):
"""
Discard stdout (i.e. write to null device) or
optionally write to given file-like object.
"""
if file_object is None:
file_object = open(os.devnull, 'w')
old_stdout = sys.stdout
try:
sys.stdout = file_object
yield
finally:
sys.stdout = old_stdout
output = StringIO.StringIO()
with silence(output):
psspy.psseinit(80000) # initialize PSS\E in python
savecase = 'IEEE 57 bus.sav'
psspy.case(savecase)
# find all the buses
psspy.bsys(0,0,[0.0,0.0],1,[1],0,[],0,[],0,[])
ierr,all_bus = psspy.abusint(0,1,['number'])
bus_num = all_bus[0]
#List of all machines
psspy.bsys(sid = 1,numbus = len(bus_num), buses = bus_num)
ierr,machine_bus = psspy.amachint(1,1,['NUMBER'])
machine_bus = machine_bus[0]
ierr,machine_id = psspy.amachchar(1,1,['ID'])
machine_id = machine_id[0]
#List of all Gen
psspy.bsys(sid = 1,numbus = len(bus_num), buses = bus_num)
ierr,gen_bus = psspy.agenbusint(1,1,['NUMBER'])
gen_bus = gen_bus[0]
#List of all load
psspy.bsys(sid = 1,numbus = len(bus_num), buses = bus_num)
ierr,load_bus = psspy.alodbusint(1,1,['NUMBER'])
load_bus = load_bus[0]
ierr,load_id = psspy.aloadchar(1,1,['ID'])
load_id = load_id[0]
#List of branches
ierr,internal_linesfbtb = psspy.abrnint(sid=1,ties=1,flag=1,string=['FROMNUMBER','TONUMBER'])
ierr,internal_linesid = psspy.abrnchar(sid=1,ties=1,flag=1,string=['ID'])
#Building the list of contingencies
line_trip = internal_linesfbtb + internal_linesid # [[fb1,tb1,id1]]
response_buses = list(bus_num)
# export the pq bus
ierr, bus_type = psspy.abusint(1,1,'type')
bus_type = bus_type[0]
pq = []
for index,bus in enumerate(bus_num):
if bus_type[index] == 1:
pq.append(bus)
# export the slack bus
slackBus = []
for index,bus in enumerate(bus_num):
if bus_type[index] == 3:
slackBus.append(bus)
| {
"content_hash": "c2e7e16a4a5697b5bcce143a2950c87f",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 108,
"avg_line_length": 29.426966292134832,
"alnum_prop": 0.637266132111493,
"repo_name": "GridProtectionAlliance/openECA",
"id": "2e9bd25ade84d05169bde643d5e1f2381688106a",
"size": "2803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Source/Analytics/TransmissionLineImpedanceCalculator/Source/Step_1_VI Acquisition/studydata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "12019"
},
{
"name": "C#",
"bytes": "1787040"
},
{
"name": "CSS",
"bytes": "8791"
},
{
"name": "F#",
"bytes": "1591"
},
{
"name": "HTML",
"bytes": "174148"
},
{
"name": "JavaScript",
"bytes": "220272"
},
{
"name": "MATLAB",
"bytes": "64296"
},
{
"name": "PLSQL",
"bytes": "114326"
},
{
"name": "PowerShell",
"bytes": "1071"
},
{
"name": "Python",
"bytes": "131705"
},
{
"name": "Rich Text Format",
"bytes": "241974"
},
{
"name": "TSQL",
"bytes": "249839"
},
{
"name": "Visual Basic .NET",
"bytes": "1151"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('threshold_value', '0005_auto_20141114_1204'),
]
operations = [
migrations.AlterField(
model_name='thresholdvalue',
name='time',
field=models.DateTimeField(default=datetime.datetime(2014, 11, 15, 23, 46, 40, 415000, tzinfo=utc)),
preserve_default=True,
),
]
| {
"content_hash": "a48be74ec82a0113ec4fbf171e9196be",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 112,
"avg_line_length": 25.857142857142858,
"alnum_prop": 0.6372007366482505,
"repo_name": "sigurdsa/angelika-api",
"id": "cdf29a3ce57d68ea4c2488ba06288131fc3faa13",
"size": "567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "threshold_value/migrations/0006_auto_20141116_0046.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "324"
},
{
"name": "Python",
"bytes": "159273"
}
],
"symlink_target": ""
} |
from __future__ import division
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from operator import itemgetter
import os
import pickle
import json
# main database
db = create_engine('sqlite:///data/processed.db')
def rank_causes(tb, var, pats, N = 3, top = 5):
# tool: check if the string is constant one letter
is_constant = lambda s: s[0] * len(s) == s
# work on a local copy
ltb = tb.copy()
# remove rows where var is NaN
ltb = ltb.ix[ltb[var].notnull()]
# find matches
cuts = {}
for i in ltb.index:
# find the first match for all patterns
idx = [ltb[var][i].find(p) for p in pats]
# keep only the successful matches
idx = [j for j in idx if j >= 0]
if idx:
cuts[i] = min(idx) + 1
else:
ltb.drop([i], inplace = True)
# don't recount column var
ltb.drop([var], axis = 1, inplace = True)
# remove things after the pattern
for (i, cut) in cuts.items():
ltb.ix[i] = ltb.ix[i].apply(lambda s: s[:cut]
if pd.notnull(s)
else None)
# count all the co-occurence probabilities
cases = []
for k in range(N, 1, -1):
ptb = ltb.copy()
# remove stuff older than k steps
for (i, cut) in cuts.items():
ptb.ix[i] = ptb.ix[i].apply(lambda s: s[-k:]
if (pd.notnull(s)
and (k <= cut))
else None)
# count frequencies
for c in ptb:
dic = ptb[c].value_counts().to_dict()
cases = cases + [(c, key, value)
for (key, value) in dic.items()
if not is_constant(key)]
# sort the cases based on the probability of co-occurrence
cases = sorted(cases, key = itemgetter(2))[-top:][::-1]
return cases
def build_belief_tree(tb, var, pat, N = 3, top = 5, depth = 1):
def one_more_layer(forest):
for tree in forest:
if tree['children']:
one_more_layer(tree['children'])
else:
data = tree['name']
causes = rank_causes(tb, data['var'], data['pat'])
tree['children'] = []
for (var, pat, prob) in causes:
tree['children'].append({'name': {'var': var,
'pat': pat,
'prob': prob},
'children': []})
root = {'name': {'var': var, 'pat': pat, 'prob': 1},
'children': []}
for i in range(depth):
one_more_layer([root])
return root
def to_output(tree):
data = tree['name']
tree['name'] = "{} {} ({})".format(data['var'],
data['pat'],
data['prob'])
if tree['children']:
for child in tree['children']:
to_output(child)
else:
tree.pop('children')
def find_patterns(dataset):
# find cached result
fn = 'data/' + dataset + '_pat.pkl'
if os.path.isfile(fn):
with open(fn, 'r') as f:
return pickle.load(f)
else:
# read database
tb = pd.read_sql_table(dataset, db, index_col = 'ID')
# clean up
tb.drop_duplicates(inplace = True)
tb.drop(tb.columns[tb.isnull().all()], axis = 1,
inplace = True)
# build tree
final_var = tb.columns[-1]
t = build_belief_tree(tb, final_var, ['+0', '+-'],
N = 4, depth = 3)
to_output(t)
t['name'] = final_var + ' +0/+-'
# cache
with open(fn, 'w') as f:
pickle.dump(t, f)
return t
def plot_bayesian_network(dataset):
# find cached result
fn = 'data/' + dataset + '.sif'
with open(fn, 'r') as f:
# read file
rows = f.readlines()
# find all the edges
edges = [tuple(r.strip().split('\t')) for r in rows]
# find all the nodes
nodes = set()
for (n1, _, n2) in edges:
nodes = nodes.union([n1, n2])
nodes = list(nodes)
# convert into cytoscape format
nodes = [{'data': {'id': n, 'name': n}} for n in nodes]
edges = [{'data': {'source': source,
'target': target,
'interaction': 'triangle'
if interaction == '+' else 'tee'}}
for (source, interaction, target) in edges]
return (json.dumps(nodes), json.dumps(edges))
| {
"content_hash": "e177d2eb11647dc5c2f9ab7c6e6bb639",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 68,
"avg_line_length": 35.67910447761194,
"alnum_prop": 0.4666387784982221,
"repo_name": "lzlarryli/limelight",
"id": "571359d783cf5a084d73a9dd19ddd7e4dd09418c",
"size": "4781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/pattern_discovery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "120515"
},
{
"name": "HTML",
"bytes": "35000"
},
{
"name": "JavaScript",
"bytes": "767682"
},
{
"name": "Python",
"bytes": "24870"
},
{
"name": "Shell",
"bytes": "127"
}
],
"symlink_target": ""
} |
"""API for the glance service."""
from django import forms
from django.views.decorators.csrf import csrf_exempt
from django.views import generic
from openstack_dashboard import api
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
CLIENT_KEYWORDS = {'resource_type', 'marker',
'sort_dir', 'sort_key', 'paginate'}
@urls.register
class Version(generic.View):
"""API for active glance version."""
url_regex = r'glance/version/$'
@rest_utils.ajax()
def get(self, request):
"""Get active glance version."""
return {'version': str(api.glance.get_version())}
@urls.register
class Image(generic.View):
"""API for retrieving a single image"""
url_regex = r'glance/images/(?P<image_id>[^/]+|default)/$'
@rest_utils.ajax()
def get(self, request, image_id):
"""Get a specific image
http://localhost/api/glance/images/cc758c90-3d98-4ea1-af44-aab405c9c915
"""
image = api.glance.image_get(request, image_id)
return image.to_dict(show_ext_attrs=True)
@rest_utils.ajax(data_required=True)
def patch(self, request, image_id):
"""Update a specific image
Update an Image using the parameters supplied in the POST
application/json object. The parameters are:
:param name: (required) the name to give the image
:param description: (optional) description of the image
:param disk_format: (required) format of the image
:param kernel: (optional) kernel to use for the image
:param ramdisk: (optional) Ramdisk to use for the image
:param architecture: (optional) the Architecture of the image
:param min_disk: (optional) the minimum disk size
for the image to boot with
:param min_ram: (optional) the minimum ram for the image to boot with
:param visibility: (required) takes 'public', 'shared', 'private' and
'community'
:param protected: (required) true if the image is protected
Any parameters not listed above will be assigned as custom properties
for the image.
http://localhost/api/glance/images/cc758c90-3d98-4ea1-af44-aab405c9c915
"""
meta = _create_image_metadata(request.DATA)
api.glance.image_update(request, image_id, **meta)
@rest_utils.ajax()
def delete(self, request, image_id):
"""Delete a specific image
DELETE http://localhost/api/glance/images/<image_id>
"""
api.glance.image_delete(request, image_id)
@urls.register
class ImageProperties(generic.View):
"""API for retrieving only a custom properties of single image."""
url_regex = r'glance/images/(?P<image_id>[^/]+)/properties/'
@rest_utils.ajax()
def get(self, request, image_id):
"""Get custom properties of specific image."""
return api.glance.image_get(request, image_id).properties
@rest_utils.ajax(data_required=True)
def patch(self, request, image_id):
"""Update custom properties of specific image.
This method returns HTTP 204 (no content) on success.
"""
if "os_hidden" in request.DATA['updated']:
request.DATA['updated']['os_hidden'] = (
request.DATA['updated']['os_hidden'] == "true")
api.glance.image_update_properties(
request, image_id, request.DATA.get('removed'),
**request.DATA['updated']
)
class UploadObjectForm(forms.Form):
data = forms.FileField(required=False)
@urls.register
class Images(generic.View):
"""API for Glance images."""
url_regex = r'glance/images/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of images.
The listing result is an object with property "items". Each item is
an image.
Example GET:
http://localhost/api/glance/images?sort_dir=desc&sort_key=name&name=cirros-0.3.2-x86_64-uec
The following get parameters may be passed in the GET
request:
:param paginate: If true will perform pagination based on settings.
:param marker: Specifies the namespace of the last-seen image.
The typical pattern of limit and marker is to make an
initial limited request and then to use the last
namespace from the response as the marker parameter
in a subsequent limited request. With paginate, limit
is automatically set.
:param sort_dir: The sort direction ('asc' or 'desc').
:param sort_key: The field to sort on (for example, 'created_at').
Default is created_at.
Any additional request parameters will be passed through the API as
filters. There are v1/v2 complications which are being addressed as a
separate work stream: https://review.opendev.org/#/c/150084/
"""
filters, kwargs = rest_utils.parse_filters_kwargs(request,
CLIENT_KEYWORDS)
images, has_more_data, has_prev_data = api.glance.image_list_detailed(
request, filters=filters, **kwargs)
return {
'items': [i.to_dict() for i in images],
'has_more_data': has_more_data,
'has_prev_data': has_prev_data,
}
# note: not an AJAX request - the body will be raw file content mixed with
# metadata
@rest_utils.post2data
@csrf_exempt
def post(self, request):
form = UploadObjectForm(request.DATA, request.FILES)
if not form.is_valid():
raise rest_utils.AjaxError(500, 'Invalid request')
data = form.clean()
meta = _create_image_metadata(request.DATA)
meta['data'] = data['data']
image = api.glance.image_create(request, **meta)
return rest_utils.CreatedResponse(
'/api/glance/images/%s' % image.name,
image.to_dict()
)
@rest_utils.ajax(data_required=True)
def put(self, request):
"""Create an Image.
Create an Image using the parameters supplied in the POST
application/json object. The parameters are:
:param name: the name to give the image
:param description: (optional) description of the image
:param source_type: (required) source type.
current only 'url' is supported
:param image_url: (required) URL to get the image
:param disk_format: (required) format of the image
:param kernel: (optional) kernel to use for the image
:param ramdisk: (optional) Ramdisk to use for the image
:param architecture: (optional) the Architecture of the image
:param min_disk: (optional) the minimum disk size
for the image to boot with
:param min_ram: (optional) the minimum ram for the image to boot with
:param visibility: (required) takes 'public', 'private', 'shared', and
'community'
:param protected: (required) true if the image is protected
:param import_data: (optional) true to copy the image data
to the image service or use it from the current location
Any parameters not listed above will be assigned as custom properties
for the image.
This returns the new image object on success.
"""
meta = _create_image_metadata(request.DATA)
if request.DATA.get('image_url'):
if request.DATA.get('import_data'):
meta['copy_from'] = request.DATA.get('image_url')
else:
meta['location'] = request.DATA.get('image_url')
else:
meta['data'] = request.DATA.get('data')
image = api.glance.image_create(request, **meta)
return rest_utils.CreatedResponse(
'/api/glance/images/%s' % image.name,
image.to_dict()
)
@urls.register
class MetadefsNamespaces(generic.View):
"""API for Single Glance Metadata Definitions.
https://docs.openstack.org/glance/latest/user/metadefs-concepts.html
"""
url_regex = r'glance/metadefs/namespaces/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of metadata definition namespaces.
The listing result is an object with property "items". Each item is
a namespace.
Example GET:
http://localhost/api/glance/metadefs/namespaces?resource_types=OS::Nova::Flavor&sort_dir=desc&marker=OS::Compute::Watchdog&paginate=False&sort_key=namespace
The following get parameters may be passed in the GET
request:
:param resource_type: Namespace resource type.
If specified returned namespace properties will have prefixes
proper for selected resource type.
:param paginate: If true will perform pagination based on settings.
:param marker: Specifies the namespace of the last-seen namespace.
The typical pattern of limit and marker is to make an
initial limited request and then to use the last
namespace from the response as the marker parameter
in a subsequent limited request. With paginate, limit
is automatically set.
:param sort_dir: The sort direction ('asc' or 'desc').
:param sort_key: The field to sort on (for example, 'created_at').
Default is namespace. The way base namespaces are loaded into
glance typically at first deployment is done in a single
transaction giving them a potentially unpredictable sort result
when using create_at.
Any additional request parameters will be passed through the API as
filters.
"""
filters, kwargs = rest_utils.parse_filters_kwargs(
request, CLIENT_KEYWORDS
)
names = ('items', 'has_more_data', 'has_prev_data')
return dict(zip(names, api.glance.metadefs_namespace_full_list(
request, filters=filters, **kwargs
)))
@urls.register
class MetadefsResourceTypesList(generic.View):
"""API for getting Metadata Definitions Resource Types List.
https://docs.openstack.org/glance/latest/user/metadefs-concepts.html
"""
url_regex = r'glance/metadefs/resourcetypes/$'
@rest_utils.ajax()
def get(self, request):
"""Get Metadata definitions resource types list.
The listing result is an object with property "items". Each item is
a resource type.
Example GET:
http://localhost/api/glance/resourcetypes/
Any request parameters will be passed through the API as filters.
"""
return {
'items': api.glance.metadefs_resource_types_list(request)
}
def _create_image_metadata(data):
# In Angular implementation we use 'visibility' field only and
# 'is_public' field is not used when creating/updating metadata.
# However, the previous 'is_public' value is sent in a request.
# We drop it here before passing it to create_image_metadata.
if 'is_public' in data:
del data['is_public']
try:
return api.glance.create_image_metadata(data)
except KeyError as e:
raise rest_utils.AjaxError(400, e.args[0])
| {
"content_hash": "a4ab688c76c9251635488240c4c173c7",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 164,
"avg_line_length": 36.89250814332247,
"alnum_prop": 0.6356171640473247,
"repo_name": "openstack/horizon",
"id": "f27bda19ac32e57115594610e80b37a93444f119",
"size": "11931",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/api/rest/glance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "583449"
},
{
"name": "JavaScript",
"bytes": "2585531"
},
{
"name": "Python",
"bytes": "5370605"
},
{
"name": "SCSS",
"bytes": "133237"
},
{
"name": "Shell",
"bytes": "6526"
}
],
"symlink_target": ""
} |
import math
import re
import tornado.gen
from thumbor.ext.filters import _alpha
from thumbor.filters import BaseFilter, filter_method
from thumbor.loaders import LoaderResult
from thumbor.utils import logger
class Filter(BaseFilter):
@staticmethod
def detect_and_get_ratio_position(pos, length):
match = re.match("^(-?)([0-9]+)p$", pos)
if not match:
return pos
sign, ratio = match.groups()
pos = "{sign}{pos}".format(
sign=sign, pos=round(length * float(ratio) / 100 + 1e-5)
)
return pos
@staticmethod
def calc_watermark_size(image_size, watermark_sz, w_ratio, h_ratio):
wm_max_width = image_size[0] * w_ratio if w_ratio else None
wm_max_height = image_size[1] * h_ratio if h_ratio else None
if not wm_max_width:
wm_max_width = watermark_sz[0] * wm_max_height / watermark_sz[1]
if not wm_max_height:
wm_max_height = watermark_sz[1] * wm_max_width / watermark_sz[0]
if watermark_sz[0] / wm_max_width >= watermark_sz[1] / wm_max_height:
wm_height = round(watermark_sz[1] * wm_max_width / watermark_sz[0])
wm_width = round(wm_max_width)
else:
wm_height = round(wm_max_height)
wm_width = round(watermark_sz[0] * wm_max_height / watermark_sz[1])
return (wm_width, wm_height)
# TODO: refactor this
def on_image_ready( # pylint: disable=too-many-statements,too-many-branches,too-many-locals
self, buffer
):
self.watermark_engine.load(buffer, None)
self.watermark_engine.enable_alpha()
mode, data = self.watermark_engine.image_data_as_rgb()
imgdata = _alpha.apply(mode, self.alpha, data)
self.watermark_engine.set_image_data(imgdata)
image_size = self.engine.size
watermark_sz = self.watermark_engine.size
if self.w_ratio or self.h_ratio:
watermark_sz = self.calc_watermark_size(
image_size, watermark_sz, self.w_ratio, self.h_ratio
)
self.watermark_engine.resize(watermark_sz[0], watermark_sz[1])
self.x = self.detect_and_get_ratio_position(self.x, image_size[0])
self.y = self.detect_and_get_ratio_position(self.y, image_size[1])
mos_x = self.x == "repeat"
mos_y = self.y == "repeat"
center_x = self.x == "center"
center_y = self.y == "center"
if not center_x and not mos_x:
inv_x = self.x[0] == "-"
x = int(self.x)
if not center_y and not mos_y:
inv_y = self.y[0] == "-"
y = int(self.y)
if not mos_x:
repeat_x = (1, 0)
if center_x:
x = int((image_size[0] - watermark_sz[0]) / 2)
elif inv_x:
x = int((image_size[0] - watermark_sz[0]) + x)
else:
repeat_x = divmod(image_size[0], watermark_sz[0])
if image_size[0] * 1.0 / watermark_sz[0] < 2:
repeat_x = (math.ceil(image_size[0] * 1.0 / watermark_sz[0]), 10)
space_x = 10
if not mos_y:
repeat_y = (1, 0)
if center_y:
y = int((image_size[1] - watermark_sz[1]) / 2)
elif inv_y:
y = int((image_size[1] - watermark_sz[1]) + y)
else:
repeat_y = divmod(image_size[1], watermark_sz[1])
if image_size[1] * 1.0 / watermark_sz[1] < 2:
repeat_y = (math.ceil(image_size[1] * 1.0 / watermark_sz[1]), 10)
space_y = 10
if not mos_x and not mos_y:
self.engine.paste(self.watermark_engine, (x, y), merge=True)
elif mos_x and mos_y:
if (repeat_x[0] * repeat_y[0]) > 100:
tmpRepeatX = min(6, repeat_x[0])
tmpRepeatY = min(6, repeat_y[0])
repeat_x = (tmpRepeatX, image_size[0] - tmpRepeatX * watermark_sz[0])
repeat_y = (tmpRepeatY, image_size[1] - tmpRepeatY * watermark_sz[1])
space_x = repeat_x[1] // (max(repeat_x[0], 2) - 1)
space_y = repeat_y[1] // (max(repeat_y[0], 2) - 1)
for i in range(int(repeat_x[0])):
x = i * space_x + i * watermark_sz[0]
for j in range(int(repeat_y[0])):
y = j * space_y + j * watermark_sz[1]
self.engine.paste(self.watermark_engine, (x, y), merge=True)
elif mos_x:
space_x = repeat_x[1] // (max(repeat_x[0], 2) - 1)
for i in range(int(repeat_x[0])):
x = i * space_x + i * watermark_sz[0]
self.engine.paste(self.watermark_engine, (x, y), merge=True)
else:
space_y = repeat_y[1] // (max(repeat_y[0], 2) - 1)
for j in range(int(repeat_y[0])):
y = j * space_y + j * watermark_sz[1]
self.engine.paste(self.watermark_engine, (x, y), merge=True)
@filter_method(
BaseFilter.String,
r"(?:-?\d+p?)|center|repeat",
r"(?:-?\d+p?)|center|repeat",
BaseFilter.PositiveNumber,
r"(?:-?\d+)|none",
r"(?:-?\d+)|none",
)
async def watermark(self, url, x, y, alpha, w_ratio=False, h_ratio=False):
self.url = url
self.x = x
self.y = y
self.alpha = alpha
self.w_ratio = (
float(w_ratio) / 100.0 if w_ratio and w_ratio != "none" else False
)
self.h_ratio = (
float(h_ratio) / 100.0 if h_ratio and h_ratio != "none" else False
)
self.watermark_engine = self.context.modules.engine.__class__(self.context)
self.storage = self.context.modules.storage
try:
buffer = await self.storage.get(self.url)
if buffer is not None:
return self.on_image_ready(buffer)
result = await self.context.modules.loader.load(self.context, self.url)
if isinstance(result, LoaderResult) and not result.successful:
logger.warning(
"bad watermark result error=%s metadata=%s",
result.error,
result.metadata,
)
raise tornado.web.HTTPError(400)
if isinstance(result, LoaderResult):
buffer = result.buffer
else:
buffer = result
await self.storage.put(self.url, buffer)
await self.storage.put_crypto(self.url)
self.on_image_ready(buffer)
except Exception as error:
if isinstance(error, tornado.web.HTTPError):
raise error
logger.warning("bad watermark")
raise tornado.web.HTTPError(500)
| {
"content_hash": "79cf9003ea1d6cb22c04a3e172552e44",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 96,
"avg_line_length": 37.54696132596685,
"alnum_prop": 0.5286933490288405,
"repo_name": "scorphus/thumbor",
"id": "eb566efdbf6e96a9d602ef19efcdec06751f8e56",
"size": "7080",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "thumbor/filters/watermark.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "58654"
},
{
"name": "JavaScript",
"bytes": "2514"
},
{
"name": "Makefile",
"bytes": "11518"
},
{
"name": "Python",
"bytes": "604965"
},
{
"name": "Shell",
"bytes": "331"
}
],
"symlink_target": ""
} |
__author__ = 'Seqian Wang'
# A class for each scan session
class ScanSession:
def __init__(self, study, rid, scan_type, scan_date, scan_time,
s_identifier, i_identifier, download_folder, raw_folder, file_type, moved=0):
self.study = study
self.rid = rid
self.scan_type = scan_type
self.scan_date = scan_date
self.scan_time = scan_time
self.s_identifier = s_identifier
self.i_identifier = i_identifier
self.download_folder = download_folder
self.raw_folder = raw_folder
self.file_type = file_type
self.moved = moved
def printObject(self):
print('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {10}'.format(
self.study, self.rid, self.scan_type, self.scan_date, self.scan_time, self.s_identifier,
self.i_identifier, self.download_folder, self.raw_folder, self.file_type, self.moved))
def printScanType(self):
print('{0}'.format(self.scan_type))
def sqlInsert(self):
return ("'%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', %d"
% (self.study, self.rid, self.scan_type, self.scan_date, self.scan_time,
self.s_identifier, self.i_identifier, self.file_type,
self.download_folder, self.raw_folder, self.moved))
def sqlUniqueValues(self):
return ("'%s', '%s', '%s', '%s', '%s'"
% (self.study, self.rid, self.scan_type, self.s_identifier, self.i_identifier))
def sqlUniqueFields(self):
return "'STUDY', 'RID', 'SCAN_TYPE', 'S_IDENTIFIER', 'I_IDENTIFIER'"
def getValuesDict(self):
return {'study': self.study, 'rid': self.rid,
'scan_type': self.scan_type,
'scan_date': self.scan_date,
'scan_time': self.scan_time,
's_identifier': self.s_identifier,
'i_identifier': self.i_identifier,
'download_folder': self.download_folder,
'raw_folder': self.raw_folder,
'file_type': self.file_type,
'moved': self.moved}
| {
"content_hash": "65bf90de788a14e94bcc84cfadab4b58",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 100,
"avg_line_length": 41.13461538461539,
"alnum_prop": 0.5493221131369799,
"repo_name": "sulantha2006/Processing_Pipeline",
"id": "56da6ddb71720628e086f89a5bf52ccee47be6f0",
"size": "2139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Recursor/ScanSession.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "M",
"bytes": "713"
},
{
"name": "MATLAB",
"bytes": "13833"
},
{
"name": "Perl",
"bytes": "141400"
},
{
"name": "Python",
"bytes": "365424"
},
{
"name": "Shell",
"bytes": "201923"
}
],
"symlink_target": ""
} |
'''
python word2vec_helpers.py input_file output_model_file output_vector_file
'''
# import modules & set up logging
import os
import sys
import logging
import multiprocessing
import time
import json
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
def output_vocab(vocab):
for k, v in vocab.items():
print(k)
def embedding_sentences(sentences, embedding_size = 128, window = 5, min_count = 5, file_to_load = None, file_to_save = None):
'''
embeding_size 词嵌入维数
window : 上下文窗口
min_count : 词频少于min_count会被删除
'''
if file_to_load is not None:
w2vModel = Word2Vec.load(file_to_load)
else:
w2vModel = Word2Vec(sentences, size = embedding_size, window = window, min_count = min_count, workers = multiprocessing.cpu_count())
if file_to_save is not None:
w2vModel.save(file_to_save)
all_vectors = []
embeddingDim = w2vModel.vector_size
# 嵌入维数
embeddingUnknown = [0 for i in range(embeddingDim)]
for sentence in sentences:
this_vector = []
for word in sentence:
if word in w2vModel.wv.vocab:
this_vector.append(w2vModel[word])
else:
this_vector.append(embeddingUnknown)
all_vectors.append(this_vector)
return all_vectors
def generate_word2vec_files(input_file, output_model_file, output_vector_file, size = 128, window = 5, min_count = 5):
start_time = time.time()
# trim unneeded model memory = use(much) less RAM
# model.init_sims(replace=True)
model = Word2Vec(LineSentence(input_file), size = size, window = window, min_count = min_count, workers = multiprocessing.cpu_count())
model.save(output_model_file)
model.wv.save_word2vec_format(output_vector_file, binary=False)
end_time = time.time()
print("used time : %d s" % (end_time - start_time))
def run_main():
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) < 4:
print globals()['__doc__'] % locals()
sys.exit(1)
input_file, output_model_file, output_vector_file = sys.argv[1:4]
generate_word2vec_files(input_file, output_model_file, output_vector_file)
def test():
vectors = embedding_sentences([['first', 'sentence'], ['second', 'sentence']], embedding_size = 4, min_count = 1)
print(vectors)
| {
"content_hash": "1eaca464e7a0899cebbec3b1817c784b",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 140,
"avg_line_length": 34.11538461538461,
"alnum_prop": 0.6388575723412251,
"repo_name": "paradise6/DetectMaliciousURL",
"id": "183bd1b15680141f988c23517270e12c69dfab8f",
"size": "2730",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "model/word2vec_helpers.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "51296"
}
],
"symlink_target": ""
} |
import simplejson
import save_csv
import ratelimit
import time
import raw
import urllib
import os
import oauth_req
def getStream(raw_urls_filename, dupes_filename, author_urls_dir, raw_status_filename = ''):
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
if author_urls_dir:
author_urls_dir = save_csv.checkDir(author_urls_dir)
key = ratelimit.getKeys(1)
consumer_key, consumer_secret, kunci, rahsia = ratelimit.getKeyData(key)
current_urls = dict()
seen_urls = raw.getRawSet(raw_urls_filename, 'url')
newly_seen_urls = set()
class StdOutListener(StreamListener):
""" A listener handles tweets are the received from the stream.
This is a basic listener that just prints received tweets to stdout.
"""
def on_status(self, status):
try:
# if tweet has a URL
if status.entities['urls']:
# process status
#storeStatus(status, raw_status_filename)
##### BEGIN storing author - URLs #####
author_id = status.author.id_str
for url in status.entities['urls']:
the_url = longUntiny(url['expanded_url'])
# check if url is a duplicate
if (the_url in seen_urls) or (the_url in newly_seen_urls):
# save duplicate URL into file
save_csv.appendCSV([[the_url, status.id_str]], dupes_filename, ['url', 'tweet_id'])
else:
# save URL into author-URL file
save_csv.appendCSV([[the_url, status.id_str]], "%s%s.csv" % (author_urls_dir, author_id), ['url', 'tweet_id'])
# store tweet off of Twitter stream
search_tweet = [the_url, status.id_str, status.text, author_id]
# store the_url and current tweet in dict
if the_url in current_urls:
current_urls[the_url].append(search_tweet)
else:
current_urls[the_url] = [search_tweet]
newly_seen_urls.add(the_url)
##### END storing author - URLs #####
# stop stream
return False
else:
# continue stream
return True
except Exception, e:
# Catch any unicode errors while printing to console
# and just ignore them to avoid breaking application.
pass
def on_error(self, status):
print status
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(kunci, rahsia)
stream = Stream(auth, l)
stream.sample()
# save seen current_urls into raw file
raw.saveRawSet(newly_seen_urls, raw_urls_filename, 'url')
def longUntiny(the_url):
import longurl
expander = longurl.LongURL()
the_url_lu = expander.expandable(the_url)
if the_url_lu:
#print "longurl: %s" % expander.expand(the_url, qurl = the_url_lu)
#lu_ok_ctr = lu_ok_ctr + 1
the_url = expander.expand(the_url, qurl = the_url_lu)
else:
#print "------- ERROR %s" % the_url
#print "------------- try untinyme..."
redo = True
while redo:
conn = urllib.urlopen("http://untiny.info/api/1.0/extract?url=%s&format=json" % urllib.quote_plus(the_url))
ljson2 = simplejson.loads(conn.readline())
if 'org_url' in ljson2:
#print "------------- untinyme result: %s" % ljson2['org_url']
#lu_ok_ctr = lu_ok_ctr + 1
the_url = ljson2['org_url']
redo = False
else:
if ljson2['error'][1] != 2:
#print "------------- ERROR %s: %s" % (ljson2['error'][1], the_url)
#print "------------- final result: %s" % the_url
#lu_no_ctr = lu_no_ctr + 1
redo = False
'''else:
"------------- ERROR Connection failed, retry..."'''
conn.close()
return the_url
def storeStatus(status, raw_status_filename):
headers = ['tweet_id', 'tweet_text', 'timestamp', 'author_id', 'rt_status_tweet_id', 'rt_status_user_id', 'ent_mentions_user_id',
'in_reply_tweet_id', 'in_reply_user_id']
tweet_id = status.id_str
tweet_text = status.text
# fix timestamp!
timestamp = status.created_at
author_id = status.author.id_str
ent_mentions_user_id = ''
if status.retweeted_status:
rt_status_tweet_id = status.retweeted_status.id_str
rt_status_user_id = status.retweeted_status.user.id_str
else:
rt_status_tweet_id = '0'
rt_status_user_id = '0'
if status.entities['user_mentions']:
for mtn in status.entities['user_mentions']:
if ent_mentions_user_id:
ent_mentions_user_id = ent_mentions_user_id + ',' + mtn['id_str']
else:
ent_mentions_user_id = mtn['id_str']
else:
ent_mentions_user_id = '0'
if status.in_reply_to_status_id_str:
in_reply_tweet_id = status.in_reply_to_status_id_str
else:
in_reply_tweet_id = '0'
if status.in_reply_to_user_id_str:
in_reply_user_id = status.in_reply_to_user_id_str
else:
in_reply_user_id = '0'
data_row = [tweet_id, tweet_text, timestamp, author_id, rt_status_tweet_id, rt_status_user_id, ent_mentions_user_id,
in_reply_tweet_id, in_reply_user_id]
save_csv.appendCSV([data_row], raw_status_filename, headers)
def getTimelineURLs(raw_urls_filename, author_urls_dir, limit):
import csv
csv.field_size_limit(50000000)
import cleanup
from datetime import datetime
if author_urls_dir:
author_urls_dir = save_csv.checkDir(author_urls_dir)
# loop through author_urls_dir
authors_list = list()
for the_id in os.listdir(author_urls_dir):
if the_id.endswith('.csv') and os.path.isfile("%stimeline\\%s" % (author_urls_dir, the_id)) == False:
authors_list.append(the_id.rstrip('.csv'))
seen_overall_urls = set(raw.getRawList(raw_urls_filename, 'url'))
for author_id in authors_list:
print "getting timeline URLs for author %s at %s" % (author_id, time.strftime("%H:%M:%S"))
# look for author's presaved list of URLs
current_urls_dict = raw.getRawDict("%s%s.csv" % (author_urls_dir, author_id), 'url', 'tweet_id')
current_urls_set = set()
arrays_to_write = [['url', 'tweet_id']]
for the_url in current_urls_dict:
current_urls_set.add(the_url)
#print current_urls_set
#print "before: %s" % current_urls_set
arrays_to_write.append([the_url, current_urls_dict[the_url]])
# get author_id's user timeline
key = ratelimit.getKeys(1, 'timeline')
consumer_key, consumer_secret, kunci, rahsia = ratelimit.getKeyData(key)
url = "https://api.twitter.com/1.1/statuses/user_timeline.json?user_id=%s&count=200?trim_user=true" % (author_id)
api_response = oauth_req.OauthReq(url, consumer_key, consumer_secret, kunci, rahsia)
if api_response[0]['status'] == '200' and api_response[1]:
json_response = simplejson.loads(api_response[1])
for tweet in json_response:
if len(current_urls_set) < limit:
tdelta = datetime.now() - datetime.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y')
if tdelta.days < 1:
if 'urls' in tweet['entities'] and tweet['entities']['urls']:
for url_dict in tweet['entities']['urls']:
# check again in case there are more than enough URLs to add
if len(current_urls_set) < limit:
try:
# expand each URL found
ent_url = longUntiny(url_dict['expanded_url']).decode("utf-8")
if (ent_url not in seen_overall_urls) and (ent_url not in current_urls_set):
# store each tweet
#current_urls.append([ent_url]) # internal memory of tweets
arrays_to_write.append([ent_url, tweet['id_str']]) # external memory of tweets
current_urls_set.add(ent_url) # internal memory of tweets
#print "after: %s" % current_urls_set
except:
print 'non-ascii chars'
else:
#print "limit reached!"
break
else:
#print "over 1 day!"
break
else:
break
elif api_response[0]['status'] == '429':
sleep_time = 30
print "rate limited at %s! neet to wait %d s" % (time.strftime("%H:%M:%S"), sleep_time)
time.sleep(sleep_time)
print "woken up at %s!" % time.strftime("%H:%M:%S")
else:
print "error getting user %s's timeline: %s" % (author_id, api_response[0]['status'])
if len(arrays_to_write) > 1:
# store author's user timeline's URLs
save_csv.SaveCSV(arrays_to_write, "%stimeline\\%s.csv" % (author_urls_dir, author_id))
raw.saveRawSet(current_urls_set, raw_urls_filename, 'url')
# store author's friends list
storeFriends(author_id, "%sfriends" % author_urls_dir)
# do cleanup - for some reason pattern matching above still is letting dupes in
# cleanup all_urls file
cleanup.cleanupOneCol(raw_urls_filename, 'url')
#'''
def storeFriends(auth_id, raw_filename_friends):
# look for auth's total friends
if os.path.isfile("%s\\%s.csv" % (raw_filename_friends, auth_id)) == False:
print "getting friends for author %s at %s" % (auth_id, time.strftime("%H:%M:%S"))
# haven't seen this auth yet, so grab friends list
current_friends = set()
cursor = "-1"
exit_loop = False
# paginating loop
while exit_loop == False:
# only proceed if there's no rate limiting
key = ratelimit.getKeys(1, f = 'friends')
consumer_key, consumer_secret, kunci, rahsia = ratelimit.getKeyData(key)
# get followers for auth_id
url = 'https://api.twitter.com/1.1/friends/ids.json?user_id=%s&cursor=%s&stringify_ids=true' % (auth_id, cursor)
api_response = oauth_req.OauthReq(url, consumer_key, consumer_secret, kunci, rahsia)
if api_response[0]['status'] == '200' and api_response[1]:
json_response = simplejson.loads(api_response[1])
if json_response['ids']:
if current_friends:
# if paginating
current_friends.update(set(json_response['ids']))
else:
# on 1st block, no paginating yet
current_friends = set(json_response['ids'])
if json_response['next_cursor_str'] != '0':
# if paginating, change cursor to next page
cursor = json_response['next_cursor_str']
else:
# no more IDs, break loop
exit_loop = True
else:
# no followers
exit_loop = True
else:
# error, or not authorized (protected)
exit_loop = True
# store followers for auth_id
raw.saveRawSet(current_friends, "%s\\%s.csv" % (raw_filename_friends, auth_id), "friends")#'''
def storeFollowers(auth_id, raw_filename_friends):
# look for auth's total friends
if os.path.isfile("%s\\%s.csv" % (raw_filename_friends, auth_id)) == False:
print "getting followers for author %s at %s" % (auth_id, time.strftime("%H:%M:%S"))
# haven't seen this auth yet, so grab friends list
current_friends = set()
cursor = "-1"
#exit_loop = False
loop = 0
# paginating loop
#while exit_loop == False:
while loop < 5:
# only proceed if there's no rate limiting
key = ratelimit.getKeys(1, f = 'followers')
consumer_key, consumer_secret, kunci, rahsia = ratelimit.getKeyData(key)
# get followers for auth_id
url = 'https://api.twitter.com/1.1/followers/ids.json?user_id=%s&cursor=%s&stringify_ids=true&count=5000' % (auth_id, cursor)
api_response = oauth_req.OauthReq(url, consumer_key, consumer_secret, kunci, rahsia)
if api_response[0]['status'] == '200' and api_response[1]:
json_response = simplejson.loads(api_response[1])
if json_response['ids']:
if current_friends:
# if paginating
current_friends.update(set(json_response['ids']))
else:
# on 1st block, no paginating yet
current_friends = set(json_response['ids'])
if json_response['next_cursor_str'] != '0':
# if paginating, change cursor to next page
cursor = json_response['next_cursor_str']
loop = loop + 1
else:
# no more IDs, break loop
#exit_loop = True
loop = 5
else:
# no followers
#exit_loop = True
loop = 5
else:
# error, or not authorized (protected)
#exit_loop = True
loop = 5
# store followers for auth_id
raw.saveRawSet(current_friends, "%s\\%s.csv" % (raw_filename_friends, auth_id), "followers")#'''
| {
"content_hash": "cd964512fd98d31aa5a33fd6e0fec829",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 138,
"avg_line_length": 39.06217616580311,
"alnum_prop": 0.5052394216739621,
"repo_name": "coolster1/dark-rt-toolkit",
"id": "915e620d42f0a5ad17d09ccfce47bcf1b0f04ab2",
"size": "15078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tk1-random/b_get_stream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "214022"
}
],
"symlink_target": ""
} |
import pprint
from celery import Celery
from .camera import CameraFactory
from .state import State
def noop(x):
pass
class TaskMonitor(object):
def __init__(self, broker=None, camera='celery_cloudwatch.PrintCamera',
verbose=False, config=None):
self.broker = broker
self.camera = camera
self.verbose = verbose
self.config = config
def run(self):
app = Celery(broker=self.broker)
state = State()
factory = CameraFactory(self.camera)
camera = factory.camera(state, self.config)
with app.connection() as connection:
camera.install()
recv = app.events.Receiver(connection, handlers={
'task-sent': self.proxy_event('task-sent',state.task_sent),
'task-started': self.proxy_event('task-started', state.task_started),
'task-succeeded': self.proxy_event('task-succeeded', state.task_succeeded),
'task-failed': self.proxy_event('task-failed', state.task_failed)
})
try:
recv.capture(limit=None, timeout=None, wakeup=False)
except KeyboardInterrupt:
raise SystemExit
finally:
camera.cancel()
def proxy_event(self, event_name, fn):
if not self.verbose:
return fn
else:
def proxy_event_fn(event):
print('[{}] - {}'.format(event_name, pprint.pformat(event)))
return fn(event)
return proxy_event_fn
| {
"content_hash": "0e9a3718447dc282a635f5f155b047ee",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 91,
"avg_line_length": 30.725490196078432,
"alnum_prop": 0.5730695596681558,
"repo_name": "3stack-software/celery-cloudwatch",
"id": "7c83b0fdb9a9dd3dd0e5e6291eca0e4fab68943c",
"size": "1567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "celery_cloudwatch/task_monitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "36463"
}
],
"symlink_target": ""
} |
from contextlib import contextmanager
import pickle
import sqlalchemy as tsa
from sqlalchemy import ARRAY
from sqlalchemy import bindparam
from sqlalchemy import BLANK_SCHEMA
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import column
from sqlalchemy import ColumnDefault
from sqlalchemy import desc
from sqlalchemy import Enum
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import schema
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import TypeDecorator
from sqlalchemy import types as sqltypes
from sqlalchemy import Unicode
from sqlalchemy import UniqueConstraint
from sqlalchemy.engine import default
from sqlalchemy.schema import AddConstraint
from sqlalchemy.schema import CreateIndex
from sqlalchemy.schema import DefaultClause
from sqlalchemy.schema import DropIndex
from sqlalchemy.sql import naming
from sqlalchemy.sql import operators
from sqlalchemy.sql.elements import _NONE_NAME
from sqlalchemy.sql.elements import literal_column
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import ComparesTables
from sqlalchemy.testing import emits_warning
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_false
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
class MetaDataTest(fixtures.TestBase, ComparesTables):
def test_metadata_contains(self):
metadata = MetaData()
t1 = Table("t1", metadata, Column("x", Integer))
t2 = Table("t2", metadata, Column("x", Integer), schema="foo")
t3 = Table("t2", MetaData(), Column("x", Integer))
t4 = Table("t1", MetaData(), Column("x", Integer), schema="foo")
assert "t1" in metadata
assert "foo.t2" in metadata
assert "t2" not in metadata
assert "foo.t1" not in metadata
assert t1 in metadata
assert t2 in metadata
assert t3 not in metadata
assert t4 not in metadata
def test_uninitialized_column_copy(self):
for col in [
Column("foo", String(), nullable=False),
Column("baz", String(), unique=True),
Column(Integer(), primary_key=True),
Column(
"bar",
Integer(),
Sequence("foo_seq"),
primary_key=True,
key="bar",
),
Column(Integer(), ForeignKey("bat.blah"), doc="this is a col"),
Column(
"bar",
Integer(),
ForeignKey("bat.blah"),
primary_key=True,
key="bar",
),
Column("bar", Integer(), info={"foo": "bar"}),
]:
c2 = col._copy()
for attr in (
"name",
"type",
"nullable",
"primary_key",
"key",
"unique",
"info",
"doc",
):
eq_(getattr(col, attr), getattr(c2, attr))
eq_(len(col.foreign_keys), len(c2.foreign_keys))
if col.default:
eq_(c2.default.name, "foo_seq")
for a1, a2 in zip(col.foreign_keys, c2.foreign_keys):
assert a1 is not a2
eq_(a2._colspec, "bat.blah")
def test_col_subclass_copy(self):
class MyColumn(schema.Column):
def __init__(self, *args, **kw):
self.widget = kw.pop("widget", None)
super(MyColumn, self).__init__(*args, **kw)
def _copy(self, *arg, **kw):
c = super(MyColumn, self)._copy(*arg, **kw)
c.widget = self.widget
return c
c1 = MyColumn("foo", Integer, widget="x")
c2 = c1._copy()
assert isinstance(c2, MyColumn)
eq_(c2.widget, "x")
def test_uninitialized_column_copy_events(self):
msgs = []
def write(c, t):
msgs.append("attach %s.%s" % (t.name, c.name))
c1 = Column("foo", String())
m = MetaData()
for i in range(3):
cx = c1._copy()
# as of 0.7, these events no longer copy. its expected
# that listeners will be re-established from the
# natural construction of things.
cx._on_table_attach(write)
Table("foo%d" % i, m, cx)
eq_(msgs, ["attach foo0.foo", "attach foo1.foo", "attach foo2.foo"])
def test_schema_collection_add(self):
metadata = MetaData()
Table("t1", metadata, Column("x", Integer), schema="foo")
Table("t2", metadata, Column("x", Integer), schema="bar")
Table("t3", metadata, Column("x", Integer))
eq_(metadata._schemas, set(["foo", "bar"]))
eq_(len(metadata.tables), 3)
def test_schema_collection_remove(self):
metadata = MetaData()
t1 = Table("t1", metadata, Column("x", Integer), schema="foo")
Table("t2", metadata, Column("x", Integer), schema="bar")
t3 = Table("t3", metadata, Column("x", Integer), schema="bar")
metadata.remove(t3)
eq_(metadata._schemas, set(["foo", "bar"]))
eq_(len(metadata.tables), 2)
metadata.remove(t1)
eq_(metadata._schemas, set(["bar"]))
eq_(len(metadata.tables), 1)
def test_schema_collection_remove_all(self):
metadata = MetaData()
Table("t1", metadata, Column("x", Integer), schema="foo")
Table("t2", metadata, Column("x", Integer), schema="bar")
metadata.clear()
eq_(metadata._schemas, set())
eq_(len(metadata.tables), 0)
def test_metadata_tables_immutable(self):
# this use case was added due to #1917.
metadata = MetaData()
Table("t1", metadata, Column("x", Integer))
assert "t1" in metadata.tables
assert_raises(TypeError, lambda: metadata.tables.pop("t1"))
@testing.provide_metadata
def test_dupe_tables(self):
metadata = self.metadata
Table(
"table1",
metadata,
Column("col1", Integer, primary_key=True),
Column("col2", String(20)),
)
metadata.create_all(testing.db)
Table("table1", metadata, autoload_with=testing.db)
def go():
Table(
"table1",
metadata,
Column("col1", Integer, primary_key=True),
Column("col2", String(20)),
)
assert_raises_message(
tsa.exc.InvalidRequestError,
"Table 'table1' is already defined for this "
"MetaData instance. Specify 'extend_existing=True' "
"to redefine options and columns on an existing "
"Table object.",
go,
)
def test_fk_copy(self):
c1 = Column("foo", Integer)
c2 = Column("bar", Integer)
m = MetaData()
t1 = Table("t", m, c1, c2)
kw = dict(
onupdate="X",
ondelete="Y",
use_alter=True,
name="f1",
deferrable="Z",
initially="Q",
link_to_name=True,
)
fk1 = ForeignKey(c1, **kw)
fk2 = ForeignKeyConstraint((c1,), (c2,), **kw)
t1.append_constraint(fk2)
fk1c = fk1._copy()
fk2c = fk2._copy()
for k in kw:
eq_(getattr(fk1c, k), kw[k])
eq_(getattr(fk2c, k), kw[k])
def test_check_constraint_copy(self):
def r(x):
return x
c = CheckConstraint(
"foo bar",
name="name",
initially=True,
deferrable=True,
_create_rule=r,
)
c2 = c._copy()
eq_(c2.name, "name")
eq_(str(c2.sqltext), "foo bar")
eq_(c2.initially, True)
eq_(c2.deferrable, True)
assert c2._create_rule is r
def test_col_replace_w_constraint(self):
m = MetaData()
a = Table("a", m, Column("id", Integer, primary_key=True))
aid = Column("a_id", ForeignKey("a.id"))
b = Table("b", m, aid)
b.append_column(aid)
assert b.c.a_id.references(a.c.id)
eq_(len(b.constraints), 2)
def test_fk_construct(self):
c1 = Column("foo", Integer)
c2 = Column("bar", Integer)
m = MetaData()
t1 = Table("t", m, c1, c2)
fk1 = ForeignKeyConstraint(("foo",), ("bar",), table=t1)
assert fk1 in t1.constraints
def test_fk_constraint_col_collection_w_table(self):
c1 = Column("foo", Integer)
c2 = Column("bar", Integer)
m = MetaData()
t1 = Table("t", m, c1, c2)
fk1 = ForeignKeyConstraint(("foo",), ("bar",), table=t1)
eq_(dict(fk1.columns), {"foo": c1})
def test_fk_constraint_col_collection_no_table(self):
fk1 = ForeignKeyConstraint(("foo", "bat"), ("bar", "hoho"))
eq_(dict(fk1.columns), {})
eq_(fk1.column_keys, ["foo", "bat"])
eq_(fk1._col_description, "foo, bat")
eq_(fk1._elements, {"foo": fk1.elements[0], "bat": fk1.elements[1]})
def test_fk_constraint_col_collection_no_table_real_cols(self):
c1 = Column("foo", Integer)
c2 = Column("bar", Integer)
fk1 = ForeignKeyConstraint((c1,), (c2,))
eq_(dict(fk1.columns), {})
eq_(fk1.column_keys, ["foo"])
eq_(fk1._col_description, "foo")
eq_(fk1._elements, {"foo": fk1.elements[0]})
def test_fk_constraint_col_collection_added_to_table(self):
c1 = Column("foo", Integer)
m = MetaData()
fk1 = ForeignKeyConstraint(("foo",), ("bar",))
Table("t", m, c1, fk1)
eq_(dict(fk1.columns), {"foo": c1})
eq_(fk1._elements, {"foo": fk1.elements[0]})
def test_fk_constraint_col_collection_via_fk(self):
fk = ForeignKey("bar")
c1 = Column("foo", Integer, fk)
m = MetaData()
t1 = Table("t", m, c1)
fk1 = fk.constraint
eq_(fk1.column_keys, ["foo"])
assert fk1 in t1.constraints
eq_(fk1.column_keys, ["foo"])
eq_(dict(fk1.columns), {"foo": c1})
eq_(fk1._elements, {"foo": fk})
def test_fk_no_such_parent_col_error(self):
meta = MetaData()
a = Table("a", meta, Column("a", Integer))
Table("b", meta, Column("b", Integer))
def go():
a.append_constraint(ForeignKeyConstraint(["x"], ["b.b"]))
assert_raises_message(
exc.ArgumentError,
"Can't create ForeignKeyConstraint on "
"table 'a': no column named 'x' is present.",
go,
)
def test_fk_given_non_col(self):
not_a_col = bindparam("x")
assert_raises_message(
exc.ArgumentError,
"String column name or Column object for DDL foreign "
"key constraint expected, got .*Bind",
ForeignKey,
not_a_col,
)
def test_fk_given_non_col_clauseelem(self):
class Foo(object):
def __clause_element__(self):
return bindparam("x")
assert_raises_message(
exc.ArgumentError,
"String column name or Column object for DDL foreign "
"key constraint expected, got .*Foo",
ForeignKey,
Foo(),
)
def test_fk_given_col_non_table(self):
t = Table("t", MetaData(), Column("x", Integer))
xa = t.alias().c.x
assert_raises_message(
exc.ArgumentError,
"ForeignKey received Column not bound to a Table, got: .*Alias",
ForeignKey,
xa,
)
def test_fk_given_col_non_table_clauseelem(self):
t = Table("t", MetaData(), Column("x", Integer))
class Foo(object):
def __clause_element__(self):
return t.alias().c.x
assert_raises_message(
exc.ArgumentError,
"ForeignKey received Column not bound to a Table, got: .*Alias",
ForeignKey,
Foo(),
)
def test_fk_no_such_target_col_error_upfront(self):
meta = MetaData()
a = Table("a", meta, Column("a", Integer))
Table("b", meta, Column("b", Integer))
a.append_constraint(ForeignKeyConstraint(["a"], ["b.x"]))
assert_raises_message(
exc.NoReferencedColumnError,
"Could not initialize target column for ForeignKey 'b.x' on "
"table 'a': table 'b' has no column named 'x'",
getattr,
list(a.foreign_keys)[0],
"column",
)
def test_fk_no_such_target_col_error_delayed(self):
meta = MetaData()
a = Table("a", meta, Column("a", Integer))
a.append_constraint(ForeignKeyConstraint(["a"], ["b.x"]))
Table("b", meta, Column("b", Integer))
assert_raises_message(
exc.NoReferencedColumnError,
"Could not initialize target column for ForeignKey 'b.x' on "
"table 'a': table 'b' has no column named 'x'",
getattr,
list(a.foreign_keys)[0],
"column",
)
def test_fk_mismatched_local_remote_cols(self):
assert_raises_message(
exc.ArgumentError,
"ForeignKeyConstraint number of constrained columns must "
"match the number of referenced columns.",
ForeignKeyConstraint,
["a"],
["b.a", "b.b"],
)
assert_raises_message(
exc.ArgumentError,
"ForeignKeyConstraint number of constrained columns "
"must match the number of referenced columns.",
ForeignKeyConstraint,
["a", "b"],
["b.a"],
)
assert_raises_message(
exc.ArgumentError,
"ForeignKeyConstraint with duplicate source column "
"references are not supported.",
ForeignKeyConstraint,
["a", "a"],
["b.a", "b.b"],
)
def test_pickle_metadata_sequence_restated(self):
m1 = MetaData()
Table(
"a",
m1,
Column("id", Integer, primary_key=True),
Column("x", Integer, Sequence("x_seq")),
)
m2 = pickle.loads(pickle.dumps(m1))
s2 = Sequence("x_seq")
t2 = Table(
"a",
m2,
Column("id", Integer, primary_key=True),
Column("x", Integer, s2),
extend_existing=True,
)
assert m2._sequences["x_seq"] is t2.c.x.default
assert m2._sequences["x_seq"] is s2
def test_sequence_restated_replaced(self):
"""Test restatement of Sequence replaces."""
m1 = MetaData()
s1 = Sequence("x_seq")
t = Table("a", m1, Column("x", Integer, s1))
assert m1._sequences["x_seq"] is s1
s2 = Sequence("x_seq")
Table("a", m1, Column("x", Integer, s2), extend_existing=True)
assert t.c.x.default is s2
assert m1._sequences["x_seq"] is s2
def test_sequence_attach_to_table(self):
m1 = MetaData()
s1 = Sequence("s")
Table("a", m1, Column("x", Integer, s1))
assert s1.metadata is m1
def test_sequence_attach_to_existing_table(self):
m1 = MetaData()
s1 = Sequence("s")
t = Table("a", m1, Column("x", Integer))
t.c.x._init_items(s1)
assert s1.metadata is m1
def test_pickle_metadata_sequence_implicit(self):
m1 = MetaData()
Table(
"a",
m1,
Column("id", Integer, primary_key=True),
Column("x", Integer, Sequence("x_seq")),
)
m2 = pickle.loads(pickle.dumps(m1))
t2 = Table("a", m2, extend_existing=True)
eq_(m2._sequences, {"x_seq": t2.c.x.default})
def test_pickle_metadata_schema(self):
m1 = MetaData()
Table(
"a",
m1,
Column("id", Integer, primary_key=True),
Column("x", Integer, Sequence("x_seq")),
schema="y",
)
m2 = pickle.loads(pickle.dumps(m1))
Table("a", m2, schema="y", extend_existing=True)
eq_(m2._schemas, m1._schemas)
def test_metadata_schema_arg(self):
m1 = MetaData(schema="sch1")
m2 = MetaData(schema="sch1", quote_schema=True)
m3 = MetaData(schema="sch1", quote_schema=False)
m4 = MetaData()
for (
i,
(
name,
metadata,
schema_,
quote_schema,
exp_schema,
exp_quote_schema,
),
) in enumerate(
[
("t1", m1, None, None, "sch1", None),
("t2", m1, "sch2", None, "sch2", None),
("t3", m1, "sch2", True, "sch2", True),
("t4", m1, "sch1", None, "sch1", None),
("t5", m1, BLANK_SCHEMA, None, None, None),
("t1", m2, None, None, "sch1", True),
("t2", m2, "sch2", None, "sch2", None),
("t3", m2, "sch2", True, "sch2", True),
("t4", m2, "sch1", None, "sch1", None),
("t1", m3, None, None, "sch1", False),
("t2", m3, "sch2", None, "sch2", None),
("t3", m3, "sch2", True, "sch2", True),
("t4", m3, "sch1", None, "sch1", None),
("t1", m4, None, None, None, None),
("t2", m4, "sch2", None, "sch2", None),
("t3", m4, "sch2", True, "sch2", True),
("t4", m4, "sch1", None, "sch1", None),
("t5", m4, BLANK_SCHEMA, None, None, None),
]
):
kw = {}
if schema_ is not None:
kw["schema"] = schema_
if quote_schema is not None:
kw["quote_schema"] = quote_schema
t = Table(name, metadata, **kw)
eq_(t.schema, exp_schema, "test %d, table schema" % i)
eq_(
t.schema.quote if t.schema is not None else None,
exp_quote_schema,
"test %d, table quote_schema" % i,
)
seq = Sequence(name, metadata=metadata, **kw)
eq_(seq.schema, exp_schema, "test %d, seq schema" % i)
eq_(
seq.schema.quote if seq.schema is not None else None,
exp_quote_schema,
"test %d, seq quote_schema" % i,
)
def test_manual_dependencies(self):
meta = MetaData()
a = Table("a", meta, Column("foo", Integer))
b = Table("b", meta, Column("foo", Integer))
c = Table("c", meta, Column("foo", Integer))
d = Table("d", meta, Column("foo", Integer))
e = Table("e", meta, Column("foo", Integer))
e.add_is_dependent_on(c)
a.add_is_dependent_on(b)
b.add_is_dependent_on(d)
e.add_is_dependent_on(b)
c.add_is_dependent_on(a)
eq_(meta.sorted_tables, [d, b, a, c, e])
def test_deterministic_order(self):
meta = MetaData()
a = Table("a", meta, Column("foo", Integer))
b = Table("b", meta, Column("foo", Integer))
c = Table("c", meta, Column("foo", Integer))
d = Table("d", meta, Column("foo", Integer))
e = Table("e", meta, Column("foo", Integer))
e.add_is_dependent_on(c)
a.add_is_dependent_on(b)
eq_(meta.sorted_tables, [b, c, d, a, e])
def test_fks_deterministic_order(self):
meta = MetaData()
a = Table("a", meta, Column("foo", Integer, ForeignKey("b.foo")))
b = Table("b", meta, Column("foo", Integer))
c = Table("c", meta, Column("foo", Integer))
d = Table("d", meta, Column("foo", Integer))
e = Table("e", meta, Column("foo", Integer, ForeignKey("c.foo")))
eq_(meta.sorted_tables, [b, c, d, a, e])
def test_cycles_fks_warning_one(self):
meta = MetaData()
a = Table("a", meta, Column("foo", Integer, ForeignKey("b.foo")))
b = Table("b", meta, Column("foo", Integer, ForeignKey("d.foo")))
c = Table("c", meta, Column("foo", Integer, ForeignKey("b.foo")))
d = Table("d", meta, Column("foo", Integer, ForeignKey("c.foo")))
e = Table("e", meta, Column("foo", Integer))
with testing.expect_warnings(
"Cannot correctly sort tables; there are unresolvable cycles "
'between tables "b, c, d", which is usually caused by mutually '
"dependent foreign key constraints. "
"Foreign key constraints involving these tables will not be "
"considered"
):
eq_(meta.sorted_tables, [b, c, d, e, a])
def test_cycles_fks_warning_two(self):
meta = MetaData()
a = Table("a", meta, Column("foo", Integer, ForeignKey("b.foo")))
b = Table("b", meta, Column("foo", Integer, ForeignKey("a.foo")))
c = Table("c", meta, Column("foo", Integer, ForeignKey("e.foo")))
d = Table("d", meta, Column("foo", Integer))
e = Table("e", meta, Column("foo", Integer, ForeignKey("d.foo")))
with testing.expect_warnings(
"Cannot correctly sort tables; there are unresolvable cycles "
'between tables "a, b", which is usually caused by mutually '
"dependent foreign key constraints. "
"Foreign key constraints involving these tables will not be "
"considered"
):
eq_(meta.sorted_tables, [a, b, d, e, c])
def test_cycles_fks_fks_delivered_separately(self):
meta = MetaData()
a = Table("a", meta, Column("foo", Integer, ForeignKey("b.foo")))
b = Table("b", meta, Column("foo", Integer, ForeignKey("a.foo")))
c = Table("c", meta, Column("foo", Integer, ForeignKey("e.foo")))
d = Table("d", meta, Column("foo", Integer))
e = Table("e", meta, Column("foo", Integer, ForeignKey("d.foo")))
results = schema.sort_tables_and_constraints(
sorted(meta.tables.values(), key=lambda t: t.key)
)
results[-1] = (None, set(results[-1][-1]))
eq_(
results,
[
(a, set()),
(b, set()),
(d, {fk.constraint for fk in d.foreign_keys}),
(e, {fk.constraint for fk in e.foreign_keys}),
(c, {fk.constraint for fk in c.foreign_keys}),
(
None,
{fk.constraint for fk in a.foreign_keys}.union(
fk.constraint for fk in b.foreign_keys
),
),
],
)
def test_cycles_fks_usealter(self):
meta = MetaData()
a = Table("a", meta, Column("foo", Integer, ForeignKey("b.foo")))
b = Table(
"b",
meta,
Column("foo", Integer, ForeignKey("d.foo", use_alter=True)),
)
c = Table("c", meta, Column("foo", Integer, ForeignKey("b.foo")))
d = Table("d", meta, Column("foo", Integer, ForeignKey("c.foo")))
e = Table("e", meta, Column("foo", Integer))
eq_(meta.sorted_tables, [b, e, a, c, d])
def test_nonexistent(self):
assert_raises(
tsa.exc.NoSuchTableError,
Table,
"fake_table",
MetaData(),
autoload_with=testing.db,
)
def test_assorted_repr(self):
t1 = Table("foo", MetaData(), Column("x", Integer))
i1 = Index("bar", t1.c.x)
ck = schema.CheckConstraint("x > y", name="someconstraint")
for const, exp in (
(Sequence("my_seq"), "Sequence('my_seq')"),
(Sequence("my_seq", start=5), "Sequence('my_seq', start=5)"),
(Column("foo", Integer), "Column('foo', Integer(), table=None)"),
(
Column(
"foo",
Integer,
primary_key=True,
nullable=False,
onupdate=1,
default=42,
server_default="42",
comment="foo",
),
"Column('foo', Integer(), table=None, primary_key=True, "
"nullable=False, onupdate=%s, default=%s, server_default=%s, "
"comment='foo')"
% (
ColumnDefault(1),
ColumnDefault(42),
DefaultClause("42"),
),
),
(
Table("bar", MetaData(), Column("x", String)),
"Table('bar', MetaData(), "
"Column('x', String(), table=<bar>), schema=None)",
),
(
schema.DefaultGenerator(for_update=True),
"DefaultGenerator(for_update=True)",
),
(schema.Index("bar", "c"), "Index('bar', 'c')"),
(i1, "Index('bar', Column('x', Integer(), table=<foo>))"),
(schema.FetchedValue(), "FetchedValue()"),
(
ck,
"CheckConstraint("
"%s"
", name='someconstraint')" % repr(ck.sqltext),
),
(ColumnDefault(("foo", "bar")), "ColumnDefault(('foo', 'bar'))"),
):
eq_(repr(const), exp)
class ToMetaDataTest(fixtures.TestBase, AssertsCompiledSQL, ComparesTables):
@testing.requires.check_constraints
def test_copy(self):
# TODO: modernize this test for 2.0
from sqlalchemy.testing.schema import Table
meta = MetaData()
table = Table(
"mytable",
meta,
Column("myid", Integer, Sequence("foo_id_seq"), primary_key=True),
Column("name", String(40), nullable=True),
Column(
"foo",
String(40),
nullable=False,
server_default="x",
server_onupdate="q",
),
Column(
"bar", String(40), nullable=False, default="y", onupdate="z"
),
Column(
"description", String(30), CheckConstraint("description='hi'")
),
UniqueConstraint("name"),
test_needs_fk=True,
)
table2 = Table(
"othertable",
meta,
Column("id", Integer, Sequence("foo_seq"), primary_key=True),
Column("myid", Integer, ForeignKey("mytable.myid")),
test_needs_fk=True,
)
table3 = Table(
"has_comments",
meta,
Column("foo", Integer, comment="some column"),
comment="table comment",
)
def test_to_metadata():
meta2 = MetaData()
table_c = table.to_metadata(meta2)
table2_c = table2.to_metadata(meta2)
table3_c = table3.to_metadata(meta2)
return (table_c, table2_c, table3_c)
def test_pickle():
meta.bind = testing.db
meta2 = pickle.loads(pickle.dumps(meta))
assert meta2.bind is None
pickle.loads(pickle.dumps(meta2))
return (
meta2.tables["mytable"],
meta2.tables["othertable"],
meta2.tables["has_comments"],
)
def test_pickle_via_reflect():
# this is the most common use case, pickling the results of a
# database reflection
meta2 = MetaData()
t1 = Table("mytable", meta2, autoload_with=testing.db)
Table("othertable", meta2, autoload_with=testing.db)
Table("has_comments", meta2, autoload_with=testing.db)
meta3 = pickle.loads(pickle.dumps(meta2))
assert meta3.bind is None
assert meta3.tables["mytable"] is not t1
return (
meta3.tables["mytable"],
meta3.tables["othertable"],
meta3.tables["has_comments"],
)
meta.create_all(testing.db)
try:
for test, has_constraints, reflect in (
(test_to_metadata, True, False),
(test_pickle, True, False),
(test_pickle_via_reflect, False, True),
):
table_c, table2_c, table3_c = test()
self.assert_tables_equal(table, table_c)
self.assert_tables_equal(table2, table2_c)
assert table is not table_c
assert table.primary_key is not table_c.primary_key
assert (
list(table2_c.c.myid.foreign_keys)[0].column
is table_c.c.myid
)
assert (
list(table2_c.c.myid.foreign_keys)[0].column
is not table.c.myid
)
assert "x" in str(table_c.c.foo.server_default.arg)
if not reflect:
assert isinstance(table_c.c.myid.default, Sequence)
assert str(table_c.c.foo.server_onupdate.arg) == "q"
assert str(table_c.c.bar.default.arg) == "y"
assert (
getattr(
table_c.c.bar.onupdate.arg,
"arg",
table_c.c.bar.onupdate.arg,
)
== "z"
)
assert isinstance(table2_c.c.id.default, Sequence)
# constraints don't get reflected for any dialect right
# now
if has_constraints:
for c in table_c.c.description.constraints:
if isinstance(c, CheckConstraint):
break
else:
assert False
assert str(c.sqltext) == "description='hi'"
for c in table_c.constraints:
if isinstance(c, UniqueConstraint):
break
else:
assert False
assert c.columns.contains_column(table_c.c.name)
assert not c.columns.contains_column(table.c.name)
if testing.requires.comment_reflection.enabled:
eq_(table3_c.comment, "table comment")
eq_(table3_c.c.foo.comment, "some column")
finally:
meta.drop_all(testing.db)
def test_col_key_fk_parent(self):
# test #2643
m1 = MetaData()
a = Table("a", m1, Column("x", Integer))
b = Table("b", m1, Column("x", Integer, ForeignKey("a.x"), key="y"))
assert b.c.y.references(a.c.x)
m2 = MetaData()
b2 = b.to_metadata(m2)
a2 = a.to_metadata(m2)
assert b2.c.y.references(a2.c.x)
def test_column_collection_constraint_w_ad_hoc_columns(self):
"""Test ColumnCollectionConstraint that has columns that aren't
part of the Table.
"""
meta = MetaData()
uq1 = UniqueConstraint(literal_column("some_name"))
cc1 = CheckConstraint(literal_column("some_name") > 5)
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
Column("name", String(40), nullable=True),
uq1,
cc1,
)
self.assert_compile(
schema.AddConstraint(uq1),
"ALTER TABLE mytable ADD UNIQUE (some_name)",
dialect="default",
)
self.assert_compile(
schema.AddConstraint(cc1),
"ALTER TABLE mytable ADD CHECK (some_name > 5)",
dialect="default",
)
meta2 = MetaData()
table2 = table.to_metadata(meta2)
uq2 = [
c for c in table2.constraints if isinstance(c, UniqueConstraint)
][0]
cc2 = [
c for c in table2.constraints if isinstance(c, CheckConstraint)
][0]
self.assert_compile(
schema.AddConstraint(uq2),
"ALTER TABLE mytable ADD UNIQUE (some_name)",
dialect="default",
)
self.assert_compile(
schema.AddConstraint(cc2),
"ALTER TABLE mytable ADD CHECK (some_name > 5)",
dialect="default",
)
def test_change_schema(self):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
Column("name", String(40), nullable=True),
Column(
"description", String(30), CheckConstraint("description='hi'")
),
UniqueConstraint("name"),
)
table2 = Table(
"othertable",
meta,
Column("id", Integer, primary_key=True),
Column("myid", Integer, ForeignKey("mytable.myid")),
)
meta2 = MetaData()
table_c = table.to_metadata(meta2, schema="someschema")
table2_c = table2.to_metadata(meta2, schema="someschema")
eq_(
str(table_c.join(table2_c).onclause),
str(table_c.c.myid == table2_c.c.myid),
)
eq_(
str(table_c.join(table2_c).onclause),
"someschema.mytable.myid = someschema.othertable.myid",
)
def test_retain_table_schema(self):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
Column("name", String(40), nullable=True),
Column(
"description", String(30), CheckConstraint("description='hi'")
),
UniqueConstraint("name"),
schema="myschema",
)
table2 = Table(
"othertable",
meta,
Column("id", Integer, primary_key=True),
Column("myid", Integer, ForeignKey("myschema.mytable.myid")),
schema="myschema",
)
meta2 = MetaData()
table_c = table.to_metadata(meta2)
table2_c = table2.to_metadata(meta2)
eq_(
str(table_c.join(table2_c).onclause),
str(table_c.c.myid == table2_c.c.myid),
)
eq_(
str(table_c.join(table2_c).onclause),
"myschema.mytable.myid = myschema.othertable.myid",
)
def test_change_name_retain_metadata(self):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
Column("name", String(40), nullable=True),
Column(
"description", String(30), CheckConstraint("description='hi'")
),
UniqueConstraint("name"),
schema="myschema",
)
table2 = table.to_metadata(table.metadata, name="newtable")
table3 = table.to_metadata(
table.metadata, schema="newschema", name="newtable"
)
assert table.metadata is table2.metadata
assert table.metadata is table3.metadata
eq_(
(table.name, table2.name, table3.name),
("mytable", "newtable", "newtable"),
)
eq_(
(table.key, table2.key, table3.key),
("myschema.mytable", "myschema.newtable", "newschema.newtable"),
)
def test_change_name_change_metadata(self):
meta = MetaData()
meta2 = MetaData()
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
Column("name", String(40), nullable=True),
Column(
"description", String(30), CheckConstraint("description='hi'")
),
UniqueConstraint("name"),
schema="myschema",
)
table2 = table.to_metadata(meta2, name="newtable")
assert table.metadata is not table2.metadata
eq_((table.name, table2.name), ("mytable", "newtable"))
eq_((table.key, table2.key), ("myschema.mytable", "myschema.newtable"))
def test_change_name_selfref_fk_moves(self):
meta = MetaData()
referenced = Table(
"ref", meta, Column("id", Integer, primary_key=True)
)
table = Table(
"mytable",
meta,
Column("id", Integer, primary_key=True),
Column("parent_id", ForeignKey("mytable.id")),
Column("ref_id", ForeignKey("ref.id")),
)
table2 = table.to_metadata(table.metadata, name="newtable")
assert table.metadata is table2.metadata
assert table2.c.ref_id.references(referenced.c.id)
assert table2.c.parent_id.references(table2.c.id)
def test_change_name_selfref_fk_moves_w_schema(self):
meta = MetaData()
referenced = Table(
"ref", meta, Column("id", Integer, primary_key=True)
)
table = Table(
"mytable",
meta,
Column("id", Integer, primary_key=True),
Column("parent_id", ForeignKey("mytable.id")),
Column("ref_id", ForeignKey("ref.id")),
)
table2 = table.to_metadata(
table.metadata, name="newtable", schema="newschema"
)
ref2 = referenced.to_metadata(table.metadata, schema="newschema")
assert table.metadata is table2.metadata
assert table2.c.ref_id.references(ref2.c.id)
assert table2.c.parent_id.references(table2.c.id)
def _assert_fk(self, t2, schema, expected, referred_schema_fn=None):
m2 = MetaData()
existing_schema = t2.schema
if schema:
t2c = t2.to_metadata(
m2, schema=schema, referred_schema_fn=referred_schema_fn
)
eq_(t2c.schema, schema)
else:
t2c = t2.to_metadata(m2, referred_schema_fn=referred_schema_fn)
eq_(t2c.schema, existing_schema)
eq_(list(t2c.c.y.foreign_keys)[0]._get_colspec(), expected)
def test_fk_has_schema_string_retain_schema(self):
m = MetaData()
t2 = Table("t2", m, Column("y", Integer, ForeignKey("q.t1.x")))
self._assert_fk(t2, None, "q.t1.x")
Table("t1", m, Column("x", Integer), schema="q")
self._assert_fk(t2, None, "q.t1.x")
def test_fk_has_schema_string_new_schema(self):
m = MetaData()
t2 = Table("t2", m, Column("y", Integer, ForeignKey("q.t1.x")))
self._assert_fk(t2, "z", "q.t1.x")
Table("t1", m, Column("x", Integer), schema="q")
self._assert_fk(t2, "z", "q.t1.x")
def test_fk_has_schema_col_retain_schema(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), schema="q")
t2 = Table("t2", m, Column("y", Integer, ForeignKey(t1.c.x)))
self._assert_fk(t2, "z", "q.t1.x")
def test_fk_has_schema_col_new_schema(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), schema="q")
t2 = Table("t2", m, Column("y", Integer, ForeignKey(t1.c.x)))
self._assert_fk(t2, "z", "q.t1.x")
def test_fk_and_referent_has_same_schema_string_retain_schema(self):
m = MetaData()
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey("q.t1.x")), schema="q"
)
self._assert_fk(t2, None, "q.t1.x")
Table("t1", m, Column("x", Integer), schema="q")
self._assert_fk(t2, None, "q.t1.x")
def test_fk_and_referent_has_same_schema_string_new_schema(self):
m = MetaData()
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey("q.t1.x")), schema="q"
)
self._assert_fk(t2, "z", "z.t1.x")
Table("t1", m, Column("x", Integer), schema="q")
self._assert_fk(t2, "z", "z.t1.x")
def test_fk_and_referent_has_same_schema_col_retain_schema(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), schema="q")
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey(t1.c.x)), schema="q"
)
self._assert_fk(t2, None, "q.t1.x")
def test_fk_and_referent_has_same_schema_col_new_schema(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), schema="q")
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey(t1.c.x)), schema="q"
)
self._assert_fk(t2, "z", "z.t1.x")
def test_fk_and_referent_has_diff_schema_string_retain_schema(self):
m = MetaData()
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey("p.t1.x")), schema="q"
)
self._assert_fk(t2, None, "p.t1.x")
Table("t1", m, Column("x", Integer), schema="p")
self._assert_fk(t2, None, "p.t1.x")
def test_fk_and_referent_has_diff_schema_string_new_schema(self):
m = MetaData()
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey("p.t1.x")), schema="q"
)
self._assert_fk(t2, "z", "p.t1.x")
Table("t1", m, Column("x", Integer), schema="p")
self._assert_fk(t2, "z", "p.t1.x")
def test_fk_and_referent_has_diff_schema_col_retain_schema(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), schema="p")
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey(t1.c.x)), schema="q"
)
self._assert_fk(t2, None, "p.t1.x")
def test_fk_and_referent_has_diff_schema_col_new_schema(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), schema="p")
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey(t1.c.x)), schema="q"
)
self._assert_fk(t2, "z", "p.t1.x")
def test_fk_custom_system(self):
m = MetaData()
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey("p.t1.x")), schema="q"
)
def ref_fn(table, to_schema, constraint, referred_schema):
assert table is t2
eq_(to_schema, "z")
eq_(referred_schema, "p")
return "h"
self._assert_fk(t2, "z", "h.t1.x", referred_schema_fn=ref_fn)
def test_copy_info(self):
m = MetaData()
fk = ForeignKey("t2.id")
c = Column("c", Integer, fk)
ck = CheckConstraint("c > 5")
t = Table("t", m, c, ck)
m.info["minfo"] = True
fk.info["fkinfo"] = True
c.info["cinfo"] = True
ck.info["ckinfo"] = True
t.info["tinfo"] = True
t.primary_key.info["pkinfo"] = True
fkc = [
const
for const in t.constraints
if isinstance(const, ForeignKeyConstraint)
][0]
fkc.info["fkcinfo"] = True
m2 = MetaData()
t2 = t.to_metadata(m2)
m.info["minfo"] = False
fk.info["fkinfo"] = False
c.info["cinfo"] = False
ck.info["ckinfo"] = False
t.primary_key.info["pkinfo"] = False
fkc.info["fkcinfo"] = False
eq_(m2.info, {})
eq_(t2.info, {"tinfo": True})
eq_(t2.c.c.info, {"cinfo": True})
eq_(list(t2.c.c.foreign_keys)[0].info, {"fkinfo": True})
eq_(t2.primary_key.info, {"pkinfo": True})
fkc2 = [
const
for const in t2.constraints
if isinstance(const, ForeignKeyConstraint)
][0]
eq_(fkc2.info, {"fkcinfo": True})
ck2 = [
const
for const in t2.constraints
if isinstance(const, CheckConstraint)
][0]
eq_(ck2.info, {"ckinfo": True})
def test_dialect_kwargs(self):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
mysql_engine="InnoDB",
)
meta2 = MetaData()
table_c = table.to_metadata(meta2)
eq_(table.kwargs, {"mysql_engine": "InnoDB"})
eq_(table.kwargs, table_c.kwargs)
def test_indexes(self):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("id", Integer, primary_key=True),
Column("data1", Integer, index=True),
Column("data2", Integer),
Index("text", text("data1 + 1")),
)
Index("multi", table.c.data1, table.c.data2)
Index("func", func.abs(table.c.data1))
Index("multi-func", table.c.data1, func.abs(table.c.data2))
meta2 = MetaData()
table_c = table.to_metadata(meta2)
def _get_key(i):
return (
[i.name, i.unique]
+ sorted(i.kwargs.items())
+ [str(col) for col in i.expressions]
)
eq_(
sorted([_get_key(i) for i in table.indexes]),
sorted([_get_key(i) for i in table_c.indexes]),
)
def test_indexes_with_col_redefine(self):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("id", Integer, primary_key=True),
Column("data1", Integer),
Column("data2", Integer),
Index("text", text("data1 + 1")),
)
Index("multi", table.c.data1, table.c.data2)
Index("func", func.abs(table.c.data1))
Index("multi-func", table.c.data1, func.abs(table.c.data2))
table = Table(
"mytable",
meta,
Column("data1", Integer),
Column("data2", Integer),
extend_existing=True,
)
meta2 = MetaData()
table_c = table.to_metadata(meta2)
def _get_key(i):
return (
[i.name, i.unique]
+ sorted(i.kwargs.items())
+ [str(col) for col in i.expressions]
)
eq_(
sorted([_get_key(i) for i in table.indexes]),
sorted([_get_key(i) for i in table_c.indexes]),
)
@emits_warning("Table '.+' already exists within the given MetaData")
def test_already_exists(self):
meta1 = MetaData()
table1 = Table(
"mytable", meta1, Column("myid", Integer, primary_key=True)
)
meta2 = MetaData()
table2 = Table(
"mytable", meta2, Column("yourid", Integer, primary_key=True)
)
table_c = table1.to_metadata(meta2)
table_d = table2.to_metadata(meta2)
# d'oh!
assert table_c is table_d
def test_default_schema_metadata(self):
meta = MetaData(schema="myschema")
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
Column("name", String(40), nullable=True),
Column(
"description", String(30), CheckConstraint("description='hi'")
),
UniqueConstraint("name"),
)
table2 = Table(
"othertable",
meta,
Column("id", Integer, primary_key=True),
Column("myid", Integer, ForeignKey("myschema.mytable.myid")),
)
meta2 = MetaData(schema="someschema")
table_c = table.to_metadata(meta2, schema=None)
table2_c = table2.to_metadata(meta2, schema=None)
eq_(
str(table_c.join(table2_c).onclause),
str(table_c.c.myid == table2_c.c.myid),
)
eq_(
str(table_c.join(table2_c).onclause),
"someschema.mytable.myid = someschema.othertable.myid",
)
def test_strip_schema(self):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
Column("name", String(40), nullable=True),
Column(
"description", String(30), CheckConstraint("description='hi'")
),
UniqueConstraint("name"),
)
table2 = Table(
"othertable",
meta,
Column("id", Integer, primary_key=True),
Column("myid", Integer, ForeignKey("mytable.myid")),
)
meta2 = MetaData()
table_c = table.to_metadata(meta2, schema=None)
table2_c = table2.to_metadata(meta2, schema=None)
eq_(
str(table_c.join(table2_c).onclause),
str(table_c.c.myid == table2_c.c.myid),
)
eq_(
str(table_c.join(table2_c).onclause),
"mytable.myid = othertable.myid",
)
def test_unique_true_flag(self):
meta = MetaData()
table = Table("mytable", meta, Column("x", Integer, unique=True))
m2 = MetaData()
t2 = table.to_metadata(m2)
eq_(
len(
[
const
for const in t2.constraints
if isinstance(const, UniqueConstraint)
]
),
1,
)
def test_index_true_flag(self):
meta = MetaData()
table = Table("mytable", meta, Column("x", Integer, index=True))
m2 = MetaData()
t2 = table.to_metadata(m2)
eq_(len(t2.indexes), 1)
class InfoTest(fixtures.TestBase):
def test_metadata_info(self):
m1 = MetaData()
eq_(m1.info, {})
m1 = MetaData(info={"foo": "bar"})
eq_(m1.info, {"foo": "bar"})
def test_foreignkey_constraint_info(self):
fkc = ForeignKeyConstraint(["a"], ["b"], name="bar")
eq_(fkc.info, {})
fkc = ForeignKeyConstraint(
["a"], ["b"], name="bar", info={"foo": "bar"}
)
eq_(fkc.info, {"foo": "bar"})
def test_foreignkey_info(self):
fkc = ForeignKey("a")
eq_(fkc.info, {})
fkc = ForeignKey("a", info={"foo": "bar"})
eq_(fkc.info, {"foo": "bar"})
def test_primarykey_constraint_info(self):
pkc = PrimaryKeyConstraint("a", name="x")
eq_(pkc.info, {})
pkc = PrimaryKeyConstraint("a", name="x", info={"foo": "bar"})
eq_(pkc.info, {"foo": "bar"})
def test_unique_constraint_info(self):
uc = UniqueConstraint("a", name="x")
eq_(uc.info, {})
uc = UniqueConstraint("a", name="x", info={"foo": "bar"})
eq_(uc.info, {"foo": "bar"})
def test_check_constraint_info(self):
cc = CheckConstraint("foo=bar", name="x")
eq_(cc.info, {})
cc = CheckConstraint("foo=bar", name="x", info={"foo": "bar"})
eq_(cc.info, {"foo": "bar"})
def test_index_info(self):
ix = Index("x", "a")
eq_(ix.info, {})
ix = Index("x", "a", info={"foo": "bar"})
eq_(ix.info, {"foo": "bar"})
def test_column_info(self):
c = Column("x", Integer)
eq_(c.info, {})
c = Column("x", Integer, info={"foo": "bar"})
eq_(c.info, {"foo": "bar"})
def test_table_info(self):
t = Table("x", MetaData())
eq_(t.info, {})
t = Table("x", MetaData(), info={"foo": "bar"})
eq_(t.info, {"foo": "bar"})
class TableTest(fixtures.TestBase, AssertsCompiledSQL):
@testing.requires.temporary_tables
@testing.skip_if("mssql", "different col format")
def test_prefixes(self):
from sqlalchemy import Table
table1 = Table(
"temporary_table_1",
MetaData(),
Column("col1", Integer),
prefixes=["TEMPORARY"],
)
self.assert_compile(
schema.CreateTable(table1),
"CREATE TEMPORARY TABLE temporary_table_1 (col1 INTEGER)",
)
table2 = Table(
"temporary_table_2",
MetaData(),
Column("col1", Integer),
prefixes=["VIRTUAL"],
)
self.assert_compile(
schema.CreateTable(table2),
"CREATE VIRTUAL TABLE temporary_table_2 (col1 INTEGER)",
)
@testing.combinations((None, []), ((), []), ([], []), (["foo"], ["foo"]))
def test_prefixes_parameter_parsing(self, arg, expected):
"""test #6685"""
table = Table("foo", MetaData(), Column("bar", Integer), prefixes=arg)
eq_(table._prefixes, expected)
def test_table_info(self):
metadata = MetaData()
t1 = Table("foo", metadata, info={"x": "y"})
t2 = Table("bar", metadata, info={})
t3 = Table("bat", metadata)
assert t1.info == {"x": "y"}
assert t2.info == {}
assert t3.info == {}
for t in (t1, t2, t3):
t.info["bar"] = "zip"
assert t.info["bar"] == "zip"
def test_invalid_objects(self):
assert_raises_message(
tsa.exc.ArgumentError,
"'SchemaItem' object, such as a 'Column' or a "
"'Constraint' expected, got <.*ColumnClause at .*; q>",
Table,
"asdf",
MetaData(),
tsa.column("q", Integer),
)
assert_raises_message(
tsa.exc.ArgumentError,
r"'SchemaItem' object, such as a 'Column' or a "
r"'Constraint' expected, got String\(\)",
Table,
"asdf",
MetaData(),
String(),
)
assert_raises_message(
tsa.exc.ArgumentError,
"'SchemaItem' object, such as a 'Column' or a "
"'Constraint' expected, got 12",
Table,
"asdf",
MetaData(),
12,
)
def test_reset_exported_passes(self):
m = MetaData()
t = Table("t", m, Column("foo", Integer))
eq_(list(t.c), [t.c.foo])
t._reset_exported()
eq_(list(t.c), [t.c.foo])
def test_foreign_key_constraints_collection(self):
metadata = MetaData()
t1 = Table("foo", metadata, Column("a", Integer))
eq_(t1.foreign_key_constraints, set())
fk1 = ForeignKey("q.id")
fk2 = ForeignKey("j.id")
fk3 = ForeignKeyConstraint(["b", "c"], ["r.x", "r.y"])
t1.append_column(Column("b", Integer, fk1))
eq_(t1.foreign_key_constraints, set([fk1.constraint]))
t1.append_column(Column("c", Integer, fk2))
eq_(t1.foreign_key_constraints, set([fk1.constraint, fk2.constraint]))
t1.append_constraint(fk3)
eq_(
t1.foreign_key_constraints,
set([fk1.constraint, fk2.constraint, fk3]),
)
def test_c_immutable(self):
m = MetaData()
t1 = Table("t", m, Column("x", Integer), Column("y", Integer))
assert_raises(TypeError, t1.c.extend, [Column("z", Integer)])
def assign():
t1.c["z"] = Column("z", Integer)
assert_raises(TypeError, assign)
def assign2():
t1.c.z = Column("z", Integer)
assert_raises(TypeError, assign2)
def test_c_mutate_after_unpickle(self):
m = MetaData()
y = Column("y", Integer)
t1 = Table("t", m, Column("x", Integer), y)
# note we are testing immutable column collection here
t2 = pickle.loads(pickle.dumps(t1))
z = Column("z", Integer)
g = Column("g", Integer)
t2.append_column(z)
is_(t1.c.contains_column(y), True)
is_(t2.c.contains_column(y), False)
y2 = t2.c.y
is_(t2.c.contains_column(y2), True)
is_(t2.c.contains_column(z), True)
is_(t2.c.contains_column(g), False)
def test_table_ctor_duplicated_column_name(self):
def go():
return Table(
"t",
MetaData(),
Column("a", Integer),
Column("col", Integer),
Column("col", String),
)
with testing.expect_deprecated(
"A column with name 'col' is already present in table 't'",
):
t = go()
is_true(isinstance(t.c.col.type, String))
# when it will raise
# with testing.expect_raises_message(
# exc.ArgumentError,
# "A column with name 'col' is already present in table 't'",
# ):
# go()
def test_append_column_existing_name(self):
t = Table("t", MetaData(), Column("col", Integer))
with testing.expect_deprecated(
"A column with name 'col' is already present in table 't'",
):
t.append_column(Column("col", String))
is_true(isinstance(t.c.col.type, String))
# when it will raise
# col = t.c.col
# with testing.expect_raises_message(
# exc.ArgumentError,
# "A column with name 'col' is already present in table 't'",
# ):
# t.append_column(Column("col", String))
# is_true(t.c.col is col)
def test_append_column_replace_existing(self):
t = Table("t", MetaData(), Column("col", Integer))
t.append_column(Column("col", String), replace_existing=True)
is_true(isinstance(t.c.col.type, String))
def test_autoincrement_replace(self):
m = MetaData()
t = Table("t", m, Column("id", Integer, primary_key=True))
is_(t._autoincrement_column, t.c.id)
t = Table(
"t",
m,
Column("id", Integer, primary_key=True),
extend_existing=True,
)
is_(t._autoincrement_column, t.c.id)
def test_pk_args_standalone(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer, primary_key=True),
PrimaryKeyConstraint(mssql_clustered=True),
)
eq_(list(t.primary_key), [t.c.x])
eq_(t.primary_key.dialect_kwargs, {"mssql_clustered": True})
def test_pk_cols_sets_flags(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
PrimaryKeyConstraint("x", "y"),
)
eq_(t.c.x.primary_key, True)
eq_(t.c.y.primary_key, True)
eq_(t.c.z.primary_key, False)
def test_pk_col_mismatch_one(self):
m = MetaData()
assert_raises_message(
exc.SAWarning,
"Table 't' specifies columns 'x' as primary_key=True, "
"not matching locally specified columns 'q'",
Table,
"t",
m,
Column("x", Integer, primary_key=True),
Column("q", Integer),
PrimaryKeyConstraint("q"),
)
def test_pk_col_mismatch_two(self):
m = MetaData()
assert_raises_message(
exc.SAWarning,
"Table 't' specifies columns 'a', 'b', 'c' as primary_key=True, "
"not matching locally specified columns 'b', 'c'",
Table,
"t",
m,
Column("a", Integer, primary_key=True),
Column("b", Integer, primary_key=True),
Column("c", Integer, primary_key=True),
PrimaryKeyConstraint("b", "c"),
)
@testing.emits_warning("Table 't'")
def test_pk_col_mismatch_three(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer, primary_key=True),
Column("q", Integer),
PrimaryKeyConstraint("q"),
)
eq_(list(t.primary_key), [t.c.q])
@testing.emits_warning("Table 't'")
def test_pk_col_mismatch_four(self):
m = MetaData()
t = Table(
"t",
m,
Column("a", Integer, primary_key=True),
Column("b", Integer, primary_key=True),
Column("c", Integer, primary_key=True),
PrimaryKeyConstraint("b", "c"),
)
eq_(list(t.primary_key), [t.c.b, t.c.c])
def test_pk_always_flips_nullable(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), PrimaryKeyConstraint("x"))
t2 = Table("t2", m, Column("x", Integer, primary_key=True))
eq_(list(t1.primary_key), [t1.c.x])
eq_(list(t2.primary_key), [t2.c.x])
assert t1.c.x.primary_key
assert t2.c.x.primary_key
assert not t2.c.x.nullable
assert not t1.c.x.nullable
def test_pk_can_be_nullable(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, nullable=True),
PrimaryKeyConstraint("x"),
)
t2 = Table(
"t2", m, Column("x", Integer, primary_key=True, nullable=True)
)
eq_(list(t1.primary_key), [t1.c.x])
eq_(list(t2.primary_key), [t2.c.x])
assert t1.c.x.primary_key
assert t2.c.x.primary_key
assert t2.c.x.nullable
assert t1.c.x.nullable
def test_must_exist(self):
with testing.expect_raises_message(
exc.InvalidRequestError, "Table 'foo' not defined"
):
Table("foo", MetaData(), must_exist=True)
class PKAutoIncrementTest(fixtures.TestBase):
def test_multi_integer_no_autoinc(self):
pk = PrimaryKeyConstraint(Column("a", Integer), Column("b", Integer))
t = Table("t", MetaData())
t.append_constraint(pk)
is_(pk._autoincrement_column, None)
def test_multi_integer_multi_autoinc(self):
pk = PrimaryKeyConstraint(
Column("a", Integer, autoincrement=True),
Column("b", Integer, autoincrement=True),
)
t = Table("t", MetaData())
t.append_constraint(pk)
assert_raises_message(
exc.ArgumentError,
"Only one Column may be marked",
lambda: pk._autoincrement_column,
)
def test_single_integer_no_autoinc(self):
pk = PrimaryKeyConstraint(Column("a", Integer))
t = Table("t", MetaData())
t.append_constraint(pk)
is_(pk._autoincrement_column, pk.columns["a"])
def test_single_string_no_autoinc(self):
pk = PrimaryKeyConstraint(Column("a", String))
t = Table("t", MetaData())
t.append_constraint(pk)
is_(pk._autoincrement_column, None)
def test_single_string_illegal_autoinc(self):
t = Table("t", MetaData(), Column("a", String, autoincrement=True))
pk = PrimaryKeyConstraint(t.c.a)
t.append_constraint(pk)
assert_raises_message(
exc.ArgumentError,
"Column type VARCHAR on column 't.a'",
lambda: pk._autoincrement_column,
)
def test_single_integer_default(self):
t = Table(
"t",
MetaData(),
Column("a", Integer, autoincrement=True, default=lambda: 1),
)
pk = PrimaryKeyConstraint(t.c.a)
t.append_constraint(pk)
is_(pk._autoincrement_column, t.c.a)
def test_single_integer_server_default(self):
# new as of 1.1; now that we have three states for autoincrement,
# if the user puts autoincrement=True with a server_default, trust
# them on it
t = Table(
"t",
MetaData(),
Column(
"a", Integer, autoincrement=True, server_default=func.magic()
),
)
pk = PrimaryKeyConstraint(t.c.a)
t.append_constraint(pk)
is_(pk._autoincrement_column, t.c.a)
def test_implicit_autoinc_but_fks(self):
m = MetaData()
Table("t1", m, Column("id", Integer, primary_key=True))
t2 = Table("t2", MetaData(), Column("a", Integer, ForeignKey("t1.id")))
pk = PrimaryKeyConstraint(t2.c.a)
t2.append_constraint(pk)
is_(pk._autoincrement_column, None)
def test_explicit_autoinc_but_fks(self):
m = MetaData()
Table("t1", m, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
MetaData(),
Column("a", Integer, ForeignKey("t1.id"), autoincrement=True),
)
pk = PrimaryKeyConstraint(t2.c.a)
t2.append_constraint(pk)
is_(pk._autoincrement_column, t2.c.a)
t3 = Table(
"t3",
MetaData(),
Column(
"a", Integer, ForeignKey("t1.id"), autoincrement="ignore_fk"
),
)
pk = PrimaryKeyConstraint(t3.c.a)
t3.append_constraint(pk)
is_(pk._autoincrement_column, t3.c.a)
def test_no_kw_args(self):
with expect_raises_message(
TypeError,
r"Table\(\) takes at least two positional-only arguments",
check_context=False,
):
Table(name="foo", metadata=MetaData())
with expect_raises_message(
TypeError,
r"Table\(\) takes at least two positional-only arguments",
check_context=False,
):
Table("foo", metadata=MetaData())
class SchemaTypeTest(fixtures.TestBase):
__backend__ = True
class TrackEvents(object):
column = None
table = None
evt_targets = ()
def _set_table(self, column, table):
super(SchemaTypeTest.TrackEvents, self)._set_table(column, table)
self.column = column
self.table = table
def _on_table_create(self, target, bind, **kw):
super(SchemaTypeTest.TrackEvents, self)._on_table_create(
target, bind, **kw
)
self.evt_targets += (target,)
def _on_metadata_create(self, target, bind, **kw):
super(SchemaTypeTest.TrackEvents, self)._on_metadata_create(
target, bind, **kw
)
self.evt_targets += (target,)
# TODO: Enum and Boolean put TypeEngine first. Changing that here
# causes collection-mutate-while-iterated errors in the event system
# since the hooks here call upon the adapted type. Need to figure out
# why Enum and Boolean don't have this problem.
class MyType(TrackEvents, sqltypes.SchemaType, sqltypes.TypeEngine):
pass
class WrapEnum(TrackEvents, Enum):
pass
class WrapBoolean(TrackEvents, Boolean):
pass
class MyTypeWImpl(MyType):
def _gen_dialect_impl(self, dialect):
return self.adapt(SchemaTypeTest.MyTypeImpl)
class MyTypeImpl(MyTypeWImpl):
pass
class MyTypeDecAndSchema(TypeDecorator, sqltypes.SchemaType):
impl = String()
cache_ok = True
evt_targets = ()
def __init__(self):
TypeDecorator.__init__(self)
sqltypes.SchemaType.__init__(self)
def _on_table_create(self, target, bind, **kw):
self.evt_targets += (target,)
def _on_metadata_create(self, target, bind, **kw):
self.evt_targets += (target,)
def test_before_parent_attach_plain(self):
typ = self.MyType()
self._test_before_parent_attach(typ)
def test_before_parent_attach_typedec_enclosing_schematype(self):
# additional test for [ticket:2919] as part of test for
# [ticket:3832]
# this also serves as the test for [ticket:6152]
class MySchemaType(sqltypes.TypeEngine, sqltypes.SchemaType):
pass
target_typ = MySchemaType()
class MyType(TypeDecorator):
impl = target_typ
cache_ok = True
typ = MyType()
self._test_before_parent_attach(typ, target_typ)
def test_before_parent_attach_array_enclosing_schematype(self):
# test for [ticket:4141] which is the same idea as [ticket:3832]
# for ARRAY
typ = ARRAY(String)
self._test_before_parent_attach(typ)
def test_before_parent_attach_typedec_of_schematype(self):
class MyType(TypeDecorator, sqltypes.SchemaType):
impl = String
cache_ok = True
typ = MyType()
self._test_before_parent_attach(typ)
def test_before_parent_attach_schematype_of_typedec(self):
class MyType(sqltypes.SchemaType, TypeDecorator):
impl = String
cache_ok = True
typ = MyType()
self._test_before_parent_attach(typ)
def test_before_parent_attach_variant_array_schematype(self):
target = Enum("one", "two", "three")
typ = ARRAY(target).with_variant(String(), "other")
self._test_before_parent_attach(typ, evt_target=target)
def _test_before_parent_attach(self, typ, evt_target=None):
canary = mock.Mock()
if evt_target is None:
evt_target = typ
orig_set_parent = evt_target._set_parent
orig_set_parent_w_dispatch = evt_target._set_parent_with_dispatch
def _set_parent(parent, **kw):
orig_set_parent(parent, **kw)
canary._set_parent(parent)
def _set_parent_w_dispatch(parent):
orig_set_parent_w_dispatch(parent)
canary._set_parent_with_dispatch(parent)
with mock.patch.object(evt_target, "_set_parent", _set_parent):
with mock.patch.object(
evt_target, "_set_parent_with_dispatch", _set_parent_w_dispatch
):
event.listen(evt_target, "before_parent_attach", canary.go)
c = Column("q", typ)
eq_(
canary.mock_calls,
[
mock.call.go(evt_target, c),
mock.call._set_parent(c),
mock.call._set_parent_with_dispatch(c),
],
)
def test_independent_schema(self):
m = MetaData()
type_ = self.MyType(schema="q")
t1 = Table("x", m, Column("y", type_), schema="z")
eq_(t1.c.y.type.schema, "q")
def test_inherit_schema_from_metadata(self):
"""test #6373"""
m = MetaData(schema="q")
type_ = self.MyType(metadata=m)
t1 = Table("x", m, Column("y", type_), schema="z")
eq_(t1.c.y.type.schema, "q")
def test_inherit_schema_from_table_override_metadata(self):
"""test #6373"""
m = MetaData(schema="q")
type_ = self.MyType(metadata=m, inherit_schema=True)
t1 = Table("x", m, Column("y", type_), schema="z")
eq_(t1.c.y.type.schema, "z")
def test_inherit_schema_from_metadata_override_explicit(self):
"""test #6373"""
m = MetaData(schema="q")
type_ = self.MyType(schema="e", metadata=m)
t1 = Table("x", m, Column("y", type_), schema="z")
eq_(t1.c.y.type.schema, "e")
def test_inherit_schema(self):
m = MetaData()
type_ = self.MyType(schema="q", inherit_schema=True)
t1 = Table("x", m, Column("y", type_), schema="z")
eq_(t1.c.y.type.schema, "z")
def test_independent_schema_enum(self):
m = MetaData()
type_ = sqltypes.Enum("a", schema="q")
t1 = Table("x", m, Column("y", type_), schema="z")
eq_(t1.c.y.type.schema, "q")
def test_inherit_schema_enum(self):
m = MetaData()
type_ = sqltypes.Enum("a", "b", "c", schema="q", inherit_schema=True)
t1 = Table("x", m, Column("y", type_), schema="z")
eq_(t1.c.y.type.schema, "z")
def test_to_metadata_copy_type(self):
m1 = MetaData()
type_ = self.MyType()
t1 = Table("x", m1, Column("y", type_))
m2 = MetaData()
t2 = t1.to_metadata(m2)
# metadata isn't set
is_(t2.c.y.type.metadata, None)
# our test type sets table, though
is_(t2.c.y.type.table, t2)
def test_to_metadata_copy_decorated(self):
class MyDecorated(TypeDecorator):
impl = self.MyType
cache_ok = True
m1 = MetaData()
type_ = MyDecorated(schema="z")
t1 = Table("x", m1, Column("y", type_))
m2 = MetaData()
t2 = t1.to_metadata(m2)
eq_(t2.c.y.type.schema, "z")
def test_to_metadata_independent_schema(self):
m1 = MetaData()
type_ = self.MyType()
t1 = Table("x", m1, Column("y", type_))
m2 = MetaData()
t2 = t1.to_metadata(m2, schema="bar")
eq_(t2.c.y.type.schema, None)
def test_to_metadata_inherit_schema(self):
m1 = MetaData()
type_ = self.MyType(inherit_schema=True)
t1 = Table("x", m1, Column("y", type_))
m2 = MetaData()
t2 = t1.to_metadata(m2, schema="bar")
eq_(t1.c.y.type.schema, None)
eq_(t2.c.y.type.schema, "bar")
def test_to_metadata_independent_events(self):
m1 = MetaData()
type_ = self.MyType()
t1 = Table("x", m1, Column("y", type_))
m2 = MetaData()
t2 = t1.to_metadata(m2)
t1.dispatch.before_create(t1, testing.db)
eq_(t1.c.y.type.evt_targets, (t1,))
eq_(t2.c.y.type.evt_targets, ())
t2.dispatch.before_create(t2, testing.db)
t2.dispatch.before_create(t2, testing.db)
eq_(t1.c.y.type.evt_targets, (t1,))
eq_(t2.c.y.type.evt_targets, (t2, t2))
def test_enum_column_copy_transfers_events(self):
m = MetaData()
type_ = self.WrapEnum("a", "b", "c", name="foo")
y = Column("y", type_)
y_copy = y._copy()
t1 = Table("x", m, y_copy)
is_true(y_copy.type._create_events)
# for PostgreSQL, this will emit CREATE TYPE
m.dispatch.before_create(t1, testing.db)
try:
eq_(t1.c.y.type.evt_targets, (t1,))
finally:
# do the drop so that PostgreSQL emits DROP TYPE
m.dispatch.after_drop(t1, testing.db)
def test_enum_nonnative_column_copy_transfers_events(self):
m = MetaData()
type_ = self.WrapEnum("a", "b", "c", name="foo", native_enum=False)
y = Column("y", type_)
y_copy = y._copy()
t1 = Table("x", m, y_copy)
is_true(y_copy.type._create_events)
m.dispatch.before_create(t1, testing.db)
eq_(t1.c.y.type.evt_targets, (t1,))
def test_enum_nonnative_column_copy_transfers_constraintpref(self):
m = MetaData()
type_ = self.WrapEnum(
"a",
"b",
"c",
name="foo",
native_enum=False,
create_constraint=False,
)
y = Column("y", type_)
y_copy = y._copy()
Table("x", m, y_copy)
is_false(y_copy.type.create_constraint)
def test_boolean_column_copy_transfers_events(self):
m = MetaData()
type_ = self.WrapBoolean()
y = Column("y", type_)
y_copy = y._copy()
Table("x", m, y_copy)
is_true(y_copy.type._create_events)
def test_boolean_nonnative_column_copy_transfers_constraintpref(self):
m = MetaData()
type_ = self.WrapBoolean(create_constraint=False)
y = Column("y", type_)
y_copy = y._copy()
Table("x", m, y_copy)
is_false(y_copy.type.create_constraint)
def test_metadata_dispatch_no_new_impl(self):
m1 = MetaData()
typ = self.MyType(metadata=m1)
m1.dispatch.before_create(m1, testing.db)
eq_(typ.evt_targets, (m1,))
dialect_impl = typ.dialect_impl(testing.db.dialect)
eq_(dialect_impl.evt_targets, ())
def test_metadata_dispatch_new_impl(self):
m1 = MetaData()
typ = self.MyTypeWImpl(metadata=m1)
m1.dispatch.before_create(m1, testing.db)
eq_(typ.evt_targets, (m1,))
dialect_impl = typ.dialect_impl(testing.db.dialect)
eq_(dialect_impl.evt_targets, (m1,))
def test_table_dispatch_decorator_schematype(self):
m1 = MetaData()
typ = self.MyTypeDecAndSchema()
t1 = Table("t1", m1, Column("x", typ))
m1.dispatch.before_create(t1, testing.db)
eq_(typ.evt_targets, (t1,))
def test_table_dispatch_no_new_impl(self):
m1 = MetaData()
typ = self.MyType()
t1 = Table("t1", m1, Column("x", typ))
m1.dispatch.before_create(t1, testing.db)
eq_(typ.evt_targets, (t1,))
dialect_impl = typ.dialect_impl(testing.db.dialect)
eq_(dialect_impl.evt_targets, ())
def test_table_dispatch_new_impl(self):
m1 = MetaData()
typ = self.MyTypeWImpl()
t1 = Table("t1", m1, Column("x", typ))
m1.dispatch.before_create(t1, testing.db)
eq_(typ.evt_targets, (t1,))
dialect_impl = typ.dialect_impl(testing.db.dialect)
eq_(dialect_impl.evt_targets, (t1,))
def test_create_metadata_bound_no_crash(self):
m1 = MetaData()
self.MyType(metadata=m1)
m1.create_all(testing.db)
def test_boolean_constraint_type_doesnt_double(self):
m1 = MetaData()
t1 = Table("x", m1, Column("flag", Boolean(create_constraint=True)))
eq_(
len([c for c in t1.constraints if isinstance(c, CheckConstraint)]),
1,
)
m2 = MetaData()
t2 = t1.to_metadata(m2)
eq_(
len([c for c in t2.constraints if isinstance(c, CheckConstraint)]),
1,
)
def test_enum_constraint_type_doesnt_double(self):
m1 = MetaData()
t1 = Table(
"x",
m1,
Column("flag", Enum("a", "b", "c", create_constraint=True)),
)
eq_(
len([c for c in t1.constraints if isinstance(c, CheckConstraint)]),
1,
)
m2 = MetaData()
t2 = t1.to_metadata(m2)
eq_(
len([c for c in t2.constraints if isinstance(c, CheckConstraint)]),
1,
)
class SchemaTest(fixtures.TestBase, AssertsCompiledSQL):
def test_default_schema_metadata_fk(self):
m = MetaData(schema="foo")
t1 = Table("t1", m, Column("x", Integer))
t2 = Table("t2", m, Column("x", Integer, ForeignKey("t1.x")))
assert t2.c.x.references(t1.c.x)
def test_ad_hoc_schema_equiv_fk(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), schema="foo")
t2 = Table(
"t2", m, Column("x", Integer, ForeignKey("t1.x")), schema="foo"
)
assert_raises(
exc.NoReferencedTableError, lambda: t2.c.x.references(t1.c.x)
)
def test_default_schema_metadata_fk_alt_remote(self):
m = MetaData(schema="foo")
t1 = Table("t1", m, Column("x", Integer))
t2 = Table(
"t2", m, Column("x", Integer, ForeignKey("t1.x")), schema="bar"
)
assert t2.c.x.references(t1.c.x)
def test_default_schema_metadata_fk_alt_local_raises(self):
m = MetaData(schema="foo")
t1 = Table("t1", m, Column("x", Integer), schema="bar")
t2 = Table("t2", m, Column("x", Integer, ForeignKey("t1.x")))
assert_raises(
exc.NoReferencedTableError, lambda: t2.c.x.references(t1.c.x)
)
def test_default_schema_metadata_fk_alt_local(self):
m = MetaData(schema="foo")
t1 = Table("t1", m, Column("x", Integer), schema="bar")
t2 = Table("t2", m, Column("x", Integer, ForeignKey("bar.t1.x")))
assert t2.c.x.references(t1.c.x)
def test_create_drop_schema(self):
self.assert_compile(
schema.CreateSchema("sa_schema"), "CREATE SCHEMA sa_schema"
)
self.assert_compile(
schema.DropSchema("sa_schema"), "DROP SCHEMA sa_schema"
)
self.assert_compile(
schema.DropSchema("sa_schema", cascade=True),
"DROP SCHEMA sa_schema CASCADE",
)
def test_iteration(self):
metadata = MetaData()
table1 = Table(
"table1",
metadata,
Column("col1", Integer, primary_key=True),
schema="someschema",
)
table2 = Table(
"table2",
metadata,
Column("col1", Integer, primary_key=True),
Column("col2", Integer, ForeignKey("someschema.table1.col1")),
schema="someschema",
)
t1 = str(schema.CreateTable(table1).compile(bind=testing.db))
t2 = str(schema.CreateTable(table2).compile(bind=testing.db))
if testing.db.dialect.preparer(testing.db.dialect).omit_schema:
assert t1.index("CREATE TABLE table1") > -1
assert t2.index("CREATE TABLE table2") > -1
else:
assert t1.index("CREATE TABLE someschema.table1") > -1
assert t2.index("CREATE TABLE someschema.table2") > -1
class UseExistingTest(fixtures.TablesTest):
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(30)),
)
@testing.fixture
def existing_meta(self):
meta2 = MetaData()
Table("users", meta2, autoload_with=testing.db)
return meta2
@testing.fixture
def empty_meta(self):
return MetaData()
def test_exception_no_flags(self, existing_meta):
def go():
Table(
"users",
existing_meta,
Column("name", Unicode),
autoload_with=testing.db,
)
assert_raises_message(
exc.InvalidRequestError,
"Table 'users' is already defined for this " "MetaData instance.",
go,
)
def test_keep_plus_existing_raises(self, existing_meta):
assert_raises(
exc.ArgumentError,
Table,
"users",
existing_meta,
keep_existing=True,
extend_existing=True,
)
def test_keep_existing_no_dupe_constraints(self, empty_meta):
users = Table(
"users",
empty_meta,
Column("id", Integer),
Column("name", Unicode),
UniqueConstraint("name"),
keep_existing=True,
)
assert "name" in users.c
assert "id" in users.c
eq_(len(users.constraints), 2)
u2 = Table(
"users",
empty_meta,
Column("id", Integer),
Column("name", Unicode),
UniqueConstraint("name"),
keep_existing=True,
)
eq_(len(u2.constraints), 2)
def test_extend_existing_dupes_constraints(self, empty_meta):
users = Table(
"users",
empty_meta,
Column("id", Integer),
Column("name", Unicode),
UniqueConstraint("name"),
extend_existing=True,
)
assert "name" in users.c
assert "id" in users.c
eq_(len(users.constraints), 2)
u2 = Table(
"users",
empty_meta,
Column("id", Integer),
Column("name", Unicode),
UniqueConstraint("name"),
extend_existing=True,
)
# constraint got duped
eq_(len(u2.constraints), 3)
def test_autoload_replace_column(self, empty_meta):
users = Table(
"users",
empty_meta,
Column("name", Unicode),
autoload_with=testing.db,
)
assert isinstance(users.c.name.type, Unicode)
def test_keep_existing_coltype(self, existing_meta):
users = Table(
"users",
existing_meta,
Column("name", Unicode),
autoload_with=testing.db,
keep_existing=True,
)
assert not isinstance(users.c.name.type, Unicode)
def test_keep_existing_quote(self, existing_meta):
users = Table(
"users",
existing_meta,
quote=True,
autoload_with=testing.db,
keep_existing=True,
)
assert not users.name.quote
def test_keep_existing_add_column(self, existing_meta):
users = Table(
"users",
existing_meta,
Column("foo", Integer),
autoload_with=testing.db,
keep_existing=True,
)
assert "foo" not in users.c
def test_keep_existing_coltype_no_orig(self, empty_meta):
users = Table(
"users",
empty_meta,
Column("name", Unicode),
autoload_with=testing.db,
keep_existing=True,
)
assert isinstance(users.c.name.type, Unicode)
@testing.skip_if(
lambda: testing.db.dialect.requires_name_normalize,
"test depends on lowercase as case insensitive",
)
def test_keep_existing_quote_no_orig(self, empty_meta):
users = Table(
"users",
empty_meta,
quote=True,
autoload_with=testing.db,
keep_existing=True,
)
assert users.name.quote
def test_keep_existing_add_column_no_orig(self, empty_meta):
users = Table(
"users",
empty_meta,
Column("foo", Integer),
autoload_with=testing.db,
keep_existing=True,
)
assert "foo" in users.c
def test_keep_existing_coltype_no_reflection(self, existing_meta):
users = Table(
"users", existing_meta, Column("name", Unicode), keep_existing=True
)
assert not isinstance(users.c.name.type, Unicode)
def test_keep_existing_quote_no_reflection(self, existing_meta):
users = Table("users", existing_meta, quote=True, keep_existing=True)
assert not users.name.quote
def test_keep_existing_add_column_no_reflection(self, existing_meta):
users = Table(
"users", existing_meta, Column("foo", Integer), keep_existing=True
)
assert "foo" not in users.c
def test_extend_existing_coltype(self, existing_meta):
users = Table(
"users",
existing_meta,
Column("name", Unicode),
autoload_with=testing.db,
extend_existing=True,
)
assert isinstance(users.c.name.type, Unicode)
def test_extend_existing_quote(self, existing_meta):
assert_raises_message(
tsa.exc.ArgumentError,
"Can't redefine 'quote' or 'quote_schema' arguments",
Table,
"users",
existing_meta,
quote=True,
autoload_with=testing.db,
extend_existing=True,
)
def test_extend_existing_add_column(self, existing_meta):
users = Table(
"users",
existing_meta,
Column("foo", Integer),
autoload_with=testing.db,
extend_existing=True,
)
assert "foo" in users.c
def test_extend_existing_coltype_no_orig(self, empty_meta):
users = Table(
"users",
empty_meta,
Column("name", Unicode),
autoload_with=testing.db,
extend_existing=True,
)
assert isinstance(users.c.name.type, Unicode)
@testing.skip_if(
lambda: testing.db.dialect.requires_name_normalize,
"test depends on lowercase as case insensitive",
)
def test_extend_existing_quote_no_orig(self, empty_meta):
users = Table(
"users",
empty_meta,
quote=True,
autoload_with=testing.db,
extend_existing=True,
)
assert users.name.quote
def test_extend_existing_add_column_no_orig(self, empty_meta):
users = Table(
"users",
empty_meta,
Column("foo", Integer),
autoload_with=testing.db,
extend_existing=True,
)
assert "foo" in users.c
def test_extend_existing_coltype_no_reflection(self, existing_meta):
users = Table(
"users",
existing_meta,
Column("name", Unicode),
extend_existing=True,
)
assert isinstance(users.c.name.type, Unicode)
def test_extend_existing_quote_no_reflection(self, existing_meta):
assert_raises_message(
tsa.exc.ArgumentError,
"Can't redefine 'quote' or 'quote_schema' arguments",
Table,
"users",
existing_meta,
quote=True,
extend_existing=True,
)
def test_extend_existing_add_column_no_reflection(self, existing_meta):
users = Table(
"users",
existing_meta,
Column("foo", Integer),
extend_existing=True,
)
assert "foo" in users.c
class ConstraintTest(fixtures.TestBase):
def _single_fixture(self):
m = MetaData()
t1 = Table("t1", m, Column("a", Integer), Column("b", Integer))
t2 = Table("t2", m, Column("a", Integer, ForeignKey("t1.a")))
t3 = Table("t3", m, Column("a", Integer))
return t1, t2, t3
def _assert_index_col_x(self, t, i, columns=True):
eq_(t.indexes, set([i]))
if columns:
eq_(list(i.columns), [t.c.x])
else:
eq_(list(i.columns), [])
assert i.table is t
def test_separate_decl_columns(self):
m = MetaData()
t = Table("t", m, Column("x", Integer))
i = Index("i", t.c.x)
self._assert_index_col_x(t, i)
def test_separate_decl_columns_functional(self):
m = MetaData()
t = Table("t", m, Column("x", Integer))
i = Index("i", func.foo(t.c.x))
self._assert_index_col_x(t, i)
def test_index_no_cols_private_table_arg(self):
m = MetaData()
t = Table("t", m, Column("x", Integer))
i = Index("i", _table=t)
is_(i.table, t)
eq_(list(i.columns), [])
def test_index_w_cols_private_table_arg(self):
m = MetaData()
t = Table("t", m, Column("x", Integer))
i = Index("i", t.c.x, _table=t)
is_(i.table, t)
eq_(list(i.columns), [t.c.x])
def test_inline_decl_columns(self):
m = MetaData()
c = Column("x", Integer)
i = Index("i", c)
t = Table("t", m, c, i)
self._assert_index_col_x(t, i)
def test_inline_decl_columns_functional(self):
m = MetaData()
c = Column("x", Integer)
i = Index("i", func.foo(c))
t = Table("t", m, c, i)
self._assert_index_col_x(t, i)
def test_inline_decl_string(self):
m = MetaData()
i = Index("i", "x")
t = Table("t", m, Column("x", Integer), i)
self._assert_index_col_x(t, i)
def test_inline_decl_textonly(self):
m = MetaData()
i = Index("i", text("foobar(x)"))
t = Table("t", m, Column("x", Integer), i)
self._assert_index_col_x(t, i, columns=False)
def test_separate_decl_textonly(self):
m = MetaData()
i = Index("i", text("foobar(x)"))
t = Table("t", m, Column("x", Integer))
t.append_constraint(i)
self._assert_index_col_x(t, i, columns=False)
def test_unnamed_column_exception(self):
# this can occur in some declarative situations
c = Column(Integer)
idx = Index("q", c)
m = MetaData()
t = Table("t", m, Column("q"))
assert_raises_message(
exc.ArgumentError,
"Can't add unnamed column to column collection",
t.append_constraint,
idx,
)
def test_non_attached_col_plus_string_expr(self):
# another one that declarative can lead towards
metadata = MetaData()
t1 = Table("a", metadata, Column("id", Integer))
c2 = Column("x", Integer)
# if we do it here, no problem
# t1.append_column(c2)
idx = Index("foo", c2, desc("foo"))
t1.append_column(c2)
self._assert_index_col_x(t1, idx, columns=True)
def test_column_associated_w_lowercase_table(self):
from sqlalchemy import table
c = Column("x", Integer)
table("foo", c)
idx = Index("q", c)
is_(idx.table, None) # lower-case-T table doesn't have indexes
def test_clauseelement_extraction_one(self):
t = Table("t", MetaData(), Column("x", Integer), Column("y", Integer))
class MyThing(object):
def __clause_element__(self):
return t.c.x + 5
idx = Index("foo", MyThing())
self._assert_index_col_x(t, idx)
def test_clauseelement_extraction_two(self):
t = Table("t", MetaData(), Column("x", Integer), Column("y", Integer))
class MyThing(object):
def __clause_element__(self):
return t.c.x + 5
idx = Index("bar", MyThing(), t.c.y)
eq_(set(t.indexes), set([idx]))
def test_clauseelement_extraction_three(self):
t = Table("t", MetaData(), Column("x", Integer), Column("y", Integer))
expr1 = t.c.x + 5
class MyThing(object):
def __clause_element__(self):
return expr1
idx = Index("bar", MyThing(), t.c.y)
is_true(idx.expressions[0].compare(expr1))
is_(idx.expressions[1], t.c.y)
def test_table_references(self):
t1, t2, t3 = self._single_fixture()
assert list(t2.c.a.foreign_keys)[0].references(t1)
assert not list(t2.c.a.foreign_keys)[0].references(t3)
def test_column_references(self):
t1, t2, t3 = self._single_fixture()
assert t2.c.a.references(t1.c.a)
assert not t2.c.a.references(t3.c.a)
assert not t2.c.a.references(t1.c.b)
def test_column_references_derived(self):
t1, t2, t3 = self._single_fixture()
s1 = tsa.select(tsa.select(t1).alias()).subquery()
assert t2.c.a.references(s1.c.a)
assert not t2.c.a.references(s1.c.b)
def test_copy_doesnt_reference(self):
t1, t2, t3 = self._single_fixture()
a2 = t2.c.a._copy()
assert not a2.references(t1.c.a)
assert not a2.references(t1.c.b)
def test_derived_column_references(self):
t1, t2, t3 = self._single_fixture()
s1 = tsa.select(tsa.select(t2).alias()).subquery()
assert s1.c.a.references(t1.c.a)
assert not s1.c.a.references(t1.c.b)
def test_referred_table_accessor(self):
t1, t2, t3 = self._single_fixture()
fkc = list(t2.foreign_key_constraints)[0]
is_(fkc.referred_table, t1)
def test_referred_table_accessor_not_available(self):
t1 = Table("t", MetaData(), Column("x", ForeignKey("q.id")))
fkc = list(t1.foreign_key_constraints)[0]
assert_raises_message(
exc.InvalidRequestError,
"Foreign key associated with column 't.x' could not find "
"table 'q' with which to generate a foreign key to target "
"column 'id'",
getattr,
fkc,
"referred_table",
)
def test_related_column_not_present_atfirst_ok(self):
m = MetaData()
base_table = Table("base", m, Column("id", Integer, primary_key=True))
fk = ForeignKey("base.q")
derived_table = Table(
"derived", m, Column("id", None, fk, primary_key=True)
)
base_table.append_column(Column("q", Integer))
assert fk.column is base_table.c.q
assert isinstance(derived_table.c.id.type, Integer)
def test_related_column_not_present_atfirst_ok_onname(self):
m = MetaData()
base_table = Table("base", m, Column("id", Integer, primary_key=True))
fk = ForeignKey("base.q", link_to_name=True)
derived_table = Table(
"derived", m, Column("id", None, fk, primary_key=True)
)
base_table.append_column(Column("q", Integer, key="zz"))
assert fk.column is base_table.c.zz
assert isinstance(derived_table.c.id.type, Integer)
def test_related_column_not_present_atfirst_ok_linktoname_conflict(self):
m = MetaData()
base_table = Table("base", m, Column("id", Integer, primary_key=True))
fk = ForeignKey("base.q", link_to_name=True)
derived_table = Table(
"derived", m, Column("id", None, fk, primary_key=True)
)
base_table.append_column(Column("zz", Integer, key="q"))
base_table.append_column(Column("q", Integer, key="zz"))
assert fk.column is base_table.c.zz
assert isinstance(derived_table.c.id.type, Integer)
def test_invalid_composite_fk_check_strings(self):
m = MetaData()
assert_raises_message(
exc.ArgumentError,
r"ForeignKeyConstraint on t1\(x, y\) refers to "
"multiple remote tables: t2 and t3",
Table,
"t1",
m,
Column("x", Integer),
Column("y", Integer),
ForeignKeyConstraint(["x", "y"], ["t2.x", "t3.y"]),
)
def test_invalid_composite_fk_check_columns(self):
m = MetaData()
t2 = Table("t2", m, Column("x", Integer))
t3 = Table("t3", m, Column("y", Integer))
assert_raises_message(
exc.ArgumentError,
r"ForeignKeyConstraint on t1\(x, y\) refers to "
"multiple remote tables: t2 and t3",
Table,
"t1",
m,
Column("x", Integer),
Column("y", Integer),
ForeignKeyConstraint(["x", "y"], [t2.c.x, t3.c.y]),
)
def test_invalid_composite_fk_check_columns_notattached(self):
m = MetaData()
x = Column("x", Integer)
y = Column("y", Integer)
# no error is raised for this one right now.
# which is a minor bug.
Table(
"t1",
m,
Column("x", Integer),
Column("y", Integer),
ForeignKeyConstraint(["x", "y"], [x, y]),
)
Table("t2", m, x)
Table("t3", m, y)
def test_constraint_copied_to_proxy_ok(self):
m = MetaData()
Table("t1", m, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
m,
Column("id", Integer, ForeignKey("t1.id"), primary_key=True),
)
s = tsa.select(t2).subquery()
t2fk = list(t2.c.id.foreign_keys)[0]
sfk = list(s.c.id.foreign_keys)[0]
# the two FKs share the ForeignKeyConstraint
is_(t2fk.constraint, sfk.constraint)
# but the ForeignKeyConstraint isn't
# aware of the select's FK
eq_(t2fk.constraint.elements, [t2fk])
def test_type_propagate_composite_fk_string(self):
metadata = MetaData()
Table(
"a",
metadata,
Column("key1", Integer, primary_key=True),
Column("key2", String(40), primary_key=True),
)
b = Table(
"b",
metadata,
Column("a_key1", None),
Column("a_key2", None),
Column("id", Integer, primary_key=True),
ForeignKeyConstraint(["a_key1", "a_key2"], ["a.key1", "a.key2"]),
)
assert isinstance(b.c.a_key1.type, Integer)
assert isinstance(b.c.a_key2.type, String)
def test_type_propagate_composite_fk_col(self):
metadata = MetaData()
a = Table(
"a",
metadata,
Column("key1", Integer, primary_key=True),
Column("key2", String(40), primary_key=True),
)
b = Table(
"b",
metadata,
Column("a_key1", None),
Column("a_key2", None),
Column("id", Integer, primary_key=True),
ForeignKeyConstraint(["a_key1", "a_key2"], [a.c.key1, a.c.key2]),
)
assert isinstance(b.c.a_key1.type, Integer)
assert isinstance(b.c.a_key2.type, String)
def test_type_propagate_standalone_fk_string(self):
metadata = MetaData()
Table("a", metadata, Column("key1", Integer, primary_key=True))
b = Table("b", metadata, Column("a_key1", None, ForeignKey("a.key1")))
assert isinstance(b.c.a_key1.type, Integer)
def test_type_propagate_standalone_fk_col(self):
metadata = MetaData()
a = Table("a", metadata, Column("key1", Integer, primary_key=True))
b = Table("b", metadata, Column("a_key1", None, ForeignKey(a.c.key1)))
assert isinstance(b.c.a_key1.type, Integer)
def test_type_propagate_chained_string_source_first(self):
metadata = MetaData()
Table("a", metadata, Column("key1", Integer, primary_key=True))
b = Table("b", metadata, Column("a_key1", None, ForeignKey("a.key1")))
c = Table(
"c", metadata, Column("b_key1", None, ForeignKey("b.a_key1"))
)
assert isinstance(b.c.a_key1.type, Integer)
assert isinstance(c.c.b_key1.type, Integer)
def test_type_propagate_chained_string_source_last(self):
metadata = MetaData()
b = Table("b", metadata, Column("a_key1", None, ForeignKey("a.key1")))
c = Table(
"c", metadata, Column("b_key1", None, ForeignKey("b.a_key1"))
)
Table("a", metadata, Column("key1", Integer, primary_key=True))
assert isinstance(b.c.a_key1.type, Integer)
assert isinstance(c.c.b_key1.type, Integer)
def test_type_propagate_chained_string_source_last_onname(self):
metadata = MetaData()
b = Table(
"b",
metadata,
Column(
"a_key1",
None,
ForeignKey("a.key1", link_to_name=True),
key="ak1",
),
)
c = Table(
"c",
metadata,
Column(
"b_key1",
None,
ForeignKey("b.a_key1", link_to_name=True),
key="bk1",
),
)
Table(
"a", metadata, Column("key1", Integer, primary_key=True, key="ak1")
)
assert isinstance(b.c.ak1.type, Integer)
assert isinstance(c.c.bk1.type, Integer)
def test_type_propagate_chained_string_source_last_onname_conflict(self):
metadata = MetaData()
b = Table(
"b",
metadata,
# b.c.key1 -> a.c.key1 -> String
Column(
"ak1",
None,
ForeignKey("a.key1", link_to_name=False),
key="key1",
),
# b.c.ak1 -> a.c.ak1 -> Integer
Column(
"a_key1",
None,
ForeignKey("a.key1", link_to_name=True),
key="ak1",
),
)
c = Table(
"c",
metadata,
# c.c.b_key1 -> b.c.ak1 -> Integer
Column("b_key1", None, ForeignKey("b.ak1", link_to_name=False)),
# c.c.b_ak1 -> b.c.ak1
Column("b_ak1", None, ForeignKey("b.ak1", link_to_name=True)),
)
Table(
"a",
metadata,
# a.c.key1
Column("ak1", String, key="key1"),
# a.c.ak1
Column("key1", Integer, primary_key=True, key="ak1"),
)
assert isinstance(b.c.key1.type, String)
assert isinstance(b.c.ak1.type, Integer)
assert isinstance(c.c.b_ak1.type, String)
assert isinstance(c.c.b_key1.type, Integer)
def test_type_propagate_chained_col_orig_first(self):
metadata = MetaData()
a = Table("a", metadata, Column("key1", Integer, primary_key=True))
b = Table("b", metadata, Column("a_key1", None, ForeignKey(a.c.key1)))
c = Table(
"c", metadata, Column("b_key1", None, ForeignKey(b.c.a_key1))
)
assert isinstance(b.c.a_key1.type, Integer)
assert isinstance(c.c.b_key1.type, Integer)
def test_column_accessor_col(self):
c1 = Column("x", Integer)
fk = ForeignKey(c1)
is_(fk.column, c1)
def test_column_accessor_clause_element(self):
c1 = Column("x", Integer)
class CThing(object):
def __init__(self, c):
self.c = c
def __clause_element__(self):
return self.c
fk = ForeignKey(CThing(c1))
is_(fk.column, c1)
def test_column_accessor_string_no_parent(self):
fk = ForeignKey("sometable.somecol")
assert_raises_message(
exc.InvalidRequestError,
"this ForeignKey object does not yet have a parent "
"Column associated with it.",
getattr,
fk,
"column",
)
def test_column_accessor_string_no_parent_table(self):
fk = ForeignKey("sometable.somecol")
Column("x", fk)
assert_raises_message(
exc.InvalidRequestError,
"this ForeignKey's parent column is not yet "
"associated with a Table.",
getattr,
fk,
"column",
)
def test_column_accessor_string_no_target_table(self):
fk = ForeignKey("sometable.somecol")
c1 = Column("x", fk)
Table("t", MetaData(), c1)
assert_raises_message(
exc.NoReferencedTableError,
"Foreign key associated with column 't.x' could not find "
"table 'sometable' with which to generate a "
"foreign key to target column 'somecol'",
getattr,
fk,
"column",
)
def test_column_accessor_string_no_target_column(self):
fk = ForeignKey("sometable.somecol")
c1 = Column("x", fk)
m = MetaData()
Table("t", m, c1)
Table("sometable", m, Column("notsomecol", Integer))
assert_raises_message(
exc.NoReferencedColumnError,
"Could not initialize target column for ForeignKey "
"'sometable.somecol' on table 't': "
"table 'sometable' has no column named 'somecol'",
getattr,
fk,
"column",
)
def test_remove_table_fk_bookkeeping(self):
metadata = MetaData()
fk = ForeignKey("t1.x")
t2 = Table("t2", metadata, Column("y", Integer, fk))
t3 = Table("t3", metadata, Column("y", Integer, ForeignKey("t1.x")))
assert t2.key in metadata.tables
assert ("t1", "x") in metadata._fk_memos
metadata.remove(t2)
# key is removed
assert t2.key not in metadata.tables
# the memo for the FK is still there
assert ("t1", "x") in metadata._fk_memos
# fk is not in the collection
assert fk not in metadata._fk_memos[("t1", "x")]
# make the referenced table
t1 = Table("t1", metadata, Column("x", Integer))
# t2 tells us exactly what's wrong
assert_raises_message(
exc.InvalidRequestError,
"Table t2 is no longer associated with its parent MetaData",
getattr,
fk,
"column",
)
# t3 is unaffected
assert t3.c.y.references(t1.c.x)
# remove twice OK
metadata.remove(t2)
def test_double_fk_usage_raises(self):
f = ForeignKey("b.id")
Column("x", Integer, f)
assert_raises(exc.InvalidRequestError, Column, "y", Integer, f)
def test_auto_append_constraint(self):
m = MetaData()
t = Table("tbl", m, Column("a", Integer), Column("b", Integer))
t2 = Table("t2", m, Column("a", Integer), Column("b", Integer))
for c in (
UniqueConstraint(t.c.a),
CheckConstraint(t.c.a > 5),
ForeignKeyConstraint([t.c.a], [t2.c.a]),
PrimaryKeyConstraint(t.c.a),
):
assert c in t.constraints
t.append_constraint(c)
assert c in t.constraints
c = Index("foo", t.c.a)
assert c in t.indexes
def test_auto_append_lowercase_table(self):
from sqlalchemy import table, column
t = table("t", column("a"))
t2 = table("t2", column("a"))
for c in (
UniqueConstraint(t.c.a),
CheckConstraint(t.c.a > 5),
ForeignKeyConstraint([t.c.a], [t2.c.a]),
PrimaryKeyConstraint(t.c.a),
Index("foo", t.c.a),
):
assert True
def test_to_metadata_ok(self):
m = MetaData()
t = Table("tbl", m, Column("a", Integer), Column("b", Integer))
t2 = Table("t2", m, Column("a", Integer), Column("b", Integer))
UniqueConstraint(t.c.a)
CheckConstraint(t.c.a > 5)
ForeignKeyConstraint([t.c.a], [t2.c.a])
PrimaryKeyConstraint(t.c.a)
m2 = MetaData()
t3 = t.to_metadata(m2)
eq_(len(t3.constraints), 4)
for c in t3.constraints:
assert c.table is t3
def test_check_constraint_copy(self):
m = MetaData()
t = Table("tbl", m, Column("a", Integer), Column("b", Integer))
ck = CheckConstraint(t.c.a > 5)
ck2 = ck._copy()
assert ck in t.constraints
assert ck2 not in t.constraints
def test_ambig_check_constraint_auto_append(self):
m = MetaData()
t = Table("tbl", m, Column("a", Integer), Column("b", Integer))
t2 = Table("t2", m, Column("a", Integer), Column("b", Integer))
c = CheckConstraint(t.c.a > t2.c.b)
assert c not in t.constraints
assert c not in t2.constraints
def test_auto_append_ck_on_col_attach_one(self):
m = MetaData()
a = Column("a", Integer)
b = Column("b", Integer)
ck = CheckConstraint(a > b)
t = Table("tbl", m, a, b)
assert ck in t.constraints
def test_auto_append_ck_on_col_attach_two(self):
m = MetaData()
a = Column("a", Integer)
b = Column("b", Integer)
c = Column("c", Integer)
ck = CheckConstraint(a > b + c)
t = Table("tbl", m, a)
assert ck not in t.constraints
t.append_column(b)
assert ck not in t.constraints
t.append_column(c)
assert ck in t.constraints
def test_auto_append_ck_on_col_attach_three(self):
m = MetaData()
a = Column("a", Integer)
b = Column("b", Integer)
c = Column("c", Integer)
ck = CheckConstraint(a > b + c)
t = Table("tbl", m, a)
assert ck not in t.constraints
t.append_column(b)
assert ck not in t.constraints
t2 = Table("t2", m)
t2.append_column(c)
# two different tables, so CheckConstraint does nothing.
assert ck not in t.constraints
def test_auto_append_uq_on_col_attach_one(self):
m = MetaData()
a = Column("a", Integer)
b = Column("b", Integer)
uq = UniqueConstraint(a, b)
t = Table("tbl", m, a, b)
assert uq in t.constraints
def test_auto_append_uq_on_col_attach_two(self):
m = MetaData()
a = Column("a", Integer)
b = Column("b", Integer)
c = Column("c", Integer)
uq = UniqueConstraint(a, b, c)
t = Table("tbl", m, a)
assert uq not in t.constraints
t.append_column(b)
assert uq not in t.constraints
t.append_column(c)
assert uq in t.constraints
def test_auto_append_uq_on_col_attach_three(self):
m = MetaData()
a = Column("a", Integer)
b = Column("b", Integer)
c = Column("c", Integer)
uq = UniqueConstraint(a, b, c)
t = Table("tbl", m, a)
assert uq not in t.constraints
t.append_column(b)
assert uq not in t.constraints
t2 = Table("t2", m)
# two different tables, so UniqueConstraint raises
assert_raises_message(
exc.ArgumentError,
r"Column\(s\) 't2\.c' are not part of table 'tbl'\.",
t2.append_column,
c,
)
def test_auto_append_uq_on_col_attach_four(self):
"""Test that a uniqueconstraint that names Column and string names
won't autoattach using deferred column attachment.
"""
m = MetaData()
a = Column("a", Integer)
b = Column("b", Integer)
c = Column("c", Integer)
uq = UniqueConstraint(a, "b", "c")
t = Table("tbl", m, a)
assert uq not in t.constraints
t.append_column(b)
assert uq not in t.constraints
t.append_column(c)
# we don't track events for previously unknown columns
# named 'c' to be attached
assert uq not in t.constraints
t.append_constraint(uq)
assert uq in t.constraints
eq_(
[cn for cn in t.constraints if isinstance(cn, UniqueConstraint)],
[uq],
)
def test_auto_append_uq_on_col_attach_five(self):
"""Test that a uniqueconstraint that names Column and string names
*will* autoattach if the table has all those names up front.
"""
m = MetaData()
a = Column("a", Integer)
b = Column("b", Integer)
c = Column("c", Integer)
t = Table("tbl", m, a, c, b)
uq = UniqueConstraint(a, "b", "c")
assert uq in t.constraints
t.append_constraint(uq)
assert uq in t.constraints
eq_(
[cn for cn in t.constraints if isinstance(cn, UniqueConstraint)],
[uq],
)
def test_index_asserts_cols_standalone(self):
metadata = MetaData()
t1 = Table("t1", metadata, Column("x", Integer))
t2 = Table("t2", metadata, Column("y", Integer))
assert_raises_message(
exc.ArgumentError,
r"Column\(s\) 't2.y' are not part of table 't1'.",
Index,
"bar",
t1.c.x,
t2.c.y,
)
def test_index_asserts_cols_inline(self):
metadata = MetaData()
t1 = Table("t1", metadata, Column("x", Integer))
assert_raises_message(
exc.ArgumentError,
"Index 'bar' is against table 't1', and "
"cannot be associated with table 't2'.",
Table,
"t2",
metadata,
Column("y", Integer),
Index("bar", t1.c.x),
)
def test_raise_index_nonexistent_name(self):
m = MetaData()
# the KeyError isn't ideal here, a nicer message
# perhaps
assert_raises(
KeyError, Table, "t", m, Column("x", Integer), Index("foo", "q")
)
def test_raise_not_a_column(self):
assert_raises(exc.ArgumentError, Index, "foo", 5)
def test_raise_expr_no_column(self):
idx = Index("foo", func.lower(5))
assert_raises_message(
exc.CompileError,
"Index 'foo' is not associated with any table.",
schema.CreateIndex(idx).compile,
dialect=testing.db.dialect,
)
assert_raises_message(
exc.CompileError,
"Index 'foo' is not associated with any table.",
schema.CreateIndex(idx).compile,
)
def test_no_warning_w_no_columns(self):
idx = Index(name="foo")
assert_raises_message(
exc.CompileError,
"Index 'foo' is not associated with any table.",
schema.CreateIndex(idx).compile,
dialect=testing.db.dialect,
)
assert_raises_message(
exc.CompileError,
"Index 'foo' is not associated with any table.",
schema.CreateIndex(idx).compile,
)
def test_raise_clauseelement_not_a_column(self):
m = MetaData()
t2 = Table("t2", m, Column("x", Integer))
class SomeClass(object):
def __clause_element__(self):
return t2
assert_raises_message(
exc.ArgumentError,
r"String column name or column expression for DDL constraint "
r"expected, got .*SomeClass",
Index,
"foo",
SomeClass(),
)
@testing.fixture
def no_pickle_annotated(self):
class NoPickle(object):
def __reduce__(self):
raise NotImplementedError()
class ClauseElement(operators.ColumnOperators):
def __init__(self, col):
self.col = col._annotate({"bar": NoPickle()})
def __clause_element__(self):
return self.col
def operate(self, op, *other, **kwargs):
return self.col.operate(op, *other, **kwargs)
m = MetaData()
t = Table("t", m, Column("q", Integer))
return t, ClauseElement(t.c.q)
def test_pickle_fk_annotated_col(self, no_pickle_annotated):
t, q_col = no_pickle_annotated
t2 = Table("t2", t.metadata, Column("p", ForeignKey(q_col)))
assert t2.c.p.references(t.c.q)
m2 = pickle.loads(pickle.dumps(t.metadata))
m2_t, m2_t2 = m2.tables["t"], m2.tables["t2"]
is_true(m2_t2.c.p.references(m2_t.c.q))
def test_pickle_uq_annotated_col(self, no_pickle_annotated):
t, q_col = no_pickle_annotated
t.append_constraint(UniqueConstraint(q_col))
m2 = pickle.loads(pickle.dumps(t.metadata))
const = [
c
for c in m2.tables["t"].constraints
if isinstance(c, UniqueConstraint)
][0]
is_true(const.columns[0].compare(t.c.q))
def test_pickle_idx_expr_annotated_col(self, no_pickle_annotated):
t, q_col = no_pickle_annotated
expr = q_col > 5
t.append_constraint(Index("conditional_index", expr))
m2 = pickle.loads(pickle.dumps(t.metadata))
const = list(m2.tables["t"].indexes)[0]
is_true(const.expressions[0].compare(expr))
def test_pickle_ck_binary_annotated_col(self, no_pickle_annotated):
t, q_col = no_pickle_annotated
ck = CheckConstraint(q_col > 5)
t.append_constraint(ck)
m2 = pickle.loads(pickle.dumps(t.metadata))
const = [
c
for c in m2.tables["t"].constraints
if isinstance(c, CheckConstraint)
][0]
is_true(const.sqltext.compare(ck.sqltext))
class ColumnDefinitionTest(AssertsCompiledSQL, fixtures.TestBase):
"""Test Column() construction."""
__dialect__ = "default"
def columns(self):
return [
Column(Integer),
Column("b", Integer),
Column(Integer),
Column("d", Integer),
Column(Integer, name="e"),
Column(type_=Integer),
Column(Integer()),
Column("h", Integer()),
Column(type_=Integer()),
]
def test_basic(self):
c = self.columns()
for i, v in ((0, "a"), (2, "c"), (5, "f"), (6, "g"), (8, "i")):
c[i].name = v
c[i].key = v
del i, v
tbl = Table("table", MetaData(), *c)
for i, col in enumerate(tbl.c):
assert col.name == c[i].name
def test_name_none(self):
c = Column(Integer)
assert_raises_message(
exc.ArgumentError,
"Column must be constructed with a non-blank name or assign a "
"non-blank .name ",
Table,
"t",
MetaData(),
c,
)
def test_name_blank(self):
c = Column("", Integer)
assert_raises_message(
exc.ArgumentError,
"Column must be constructed with a non-blank name or assign a "
"non-blank .name ",
Table,
"t",
MetaData(),
c,
)
def test_no_shared_column_schema(self):
c = Column("x", Integer)
Table("t", MetaData(), c)
assert_raises_message(
exc.ArgumentError,
"Column object 'x' already assigned to Table 't'",
Table,
"q",
MetaData(),
c,
)
def test_no_shared_column_sql(self):
c = column("x", Integer)
table("t", c)
assert_raises_message(
exc.ArgumentError,
"column object 'x' already assigned to table 't'",
table,
"q",
c,
)
def test_incomplete_key(self):
c = Column(Integer)
assert c.name is None
assert c.key is None
c.name = "named"
Table("t", MetaData(), c)
assert c.name == "named"
assert c.name == c.key
def test_unique_index_flags_default_to_none(self):
c = Column(Integer)
eq_(c.unique, None)
eq_(c.index, None)
c = Column("c", Integer, index=True)
eq_(c.unique, None)
eq_(c.index, True)
t = Table("t", MetaData(), c)
eq_(list(t.indexes)[0].unique, False)
c = Column(Integer, unique=True)
eq_(c.unique, True)
eq_(c.index, None)
c = Column("c", Integer, index=True, unique=True)
eq_(c.unique, True)
eq_(c.index, True)
t = Table("t", MetaData(), c)
eq_(list(t.indexes)[0].unique, True)
def test_bogus(self):
assert_raises(exc.ArgumentError, Column, "foo", name="bar")
assert_raises(
exc.ArgumentError, Column, "foo", Integer, type_=Integer()
)
def test_custom_subclass_proxy(self):
"""test proxy generation of a Column subclass, can be compiled."""
from sqlalchemy.schema import Column
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import select
class MyColumn(Column):
def _constructor(self, name, type_, **kw):
kw["name"] = name
return MyColumn(type_, **kw)
def __init__(self, type_, **kw):
Column.__init__(self, type_, **kw)
def my_goofy_thing(self):
return "hi"
@compiles(MyColumn)
def goofy(element, compiler, **kw):
s = compiler.visit_column(element, **kw)
return s + "-"
id_ = MyColumn(Integer, primary_key=True)
id_.name = "id"
name = MyColumn(String)
name.name = "name"
t1 = Table("foo", MetaData(), id_, name)
# goofy thing
eq_(t1.c.name.my_goofy_thing(), "hi")
# create proxy
s = select(t1.select().alias())
# proxy has goofy thing
eq_(s.subquery().c.name.my_goofy_thing(), "hi")
# compile works
self.assert_compile(
select(t1.select().alias()),
"SELECT anon_1.id-, anon_1.name- FROM "
"(SELECT foo.id- AS id, foo.name- AS name "
"FROM foo) AS anon_1",
)
def test_custom_subclass_proxy_typeerror(self):
from sqlalchemy.schema import Column
from sqlalchemy.sql import select
class MyColumn(Column):
def __init__(self, type_, **kw):
Column.__init__(self, type_, **kw)
id_ = MyColumn(Integer, primary_key=True)
id_.name = "id"
name = MyColumn(String)
name.name = "name"
t1 = Table("foo", MetaData(), id_, name)
assert_raises_message(
TypeError,
"Could not create a copy of this <class "
"'test.sql.test_metadata..*MyColumn'> "
"object. Ensure the class includes a _constructor()",
getattr,
select(t1.select().alias()).subquery(),
"c",
)
def test_custom_create(self):
from sqlalchemy.ext.compiler import compiles, deregister
@compiles(schema.CreateColumn)
def compile_(element, compiler, **kw):
column = element.element
if "special" not in column.info:
return compiler.visit_create_column(element, **kw)
text = "%s SPECIAL DIRECTIVE %s" % (
column.name,
compiler.type_compiler.process(column.type),
)
default = compiler.get_column_default_string(column)
if default is not None:
text += " DEFAULT " + default
if not column.nullable:
text += " NOT NULL"
if column.constraints:
text += " ".join(
compiler.process(const) for const in column.constraints
)
return text
t = Table(
"mytable",
MetaData(),
Column("x", Integer, info={"special": True}, primary_key=True),
Column("y", String(50)),
Column("z", String(20), info={"special": True}),
)
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE mytable (x SPECIAL DIRECTIVE INTEGER "
"NOT NULL, y VARCHAR(50), "
"z SPECIAL DIRECTIVE VARCHAR(20), PRIMARY KEY (x))",
)
deregister(schema.CreateColumn)
class ColumnDefaultsTest(fixtures.TestBase):
"""test assignment of default fixures to columns"""
def _fixture(self, *arg, **kw):
return Column("x", Integer, *arg, **kw)
def test_server_default_positional(self):
target = schema.DefaultClause("y")
c = self._fixture(target)
assert c.server_default is target
assert target.column is c
def test_onupdate_default_not_server_default_one(self):
target1 = schema.DefaultClause("y")
target2 = schema.DefaultClause("z")
c = self._fixture(server_default=target1, server_onupdate=target2)
eq_(c.server_default.arg, "y")
eq_(c.server_onupdate.arg, "z")
def test_onupdate_default_not_server_default_two(self):
target1 = schema.DefaultClause("y", for_update=True)
target2 = schema.DefaultClause("z", for_update=True)
c = self._fixture(server_default=target1, server_onupdate=target2)
eq_(c.server_default.arg, "y")
eq_(c.server_onupdate.arg, "z")
def test_onupdate_default_not_server_default_three(self):
target1 = schema.DefaultClause("y", for_update=False)
target2 = schema.DefaultClause("z", for_update=True)
c = self._fixture(target1, target2)
eq_(c.server_default.arg, "y")
eq_(c.server_onupdate.arg, "z")
def test_onupdate_default_not_server_default_four(self):
target1 = schema.DefaultClause("y", for_update=False)
c = self._fixture(server_onupdate=target1)
is_(c.server_default, None)
eq_(c.server_onupdate.arg, "y")
def test_server_default_keyword_as_schemaitem(self):
target = schema.DefaultClause("y")
c = self._fixture(server_default=target)
assert c.server_default is target
assert target.column is c
def test_server_default_keyword_as_clause(self):
target = "y"
c = self._fixture(server_default=target)
assert c.server_default.arg == target
assert c.server_default.column is c
def test_server_default_onupdate_positional(self):
target = schema.DefaultClause("y", for_update=True)
c = self._fixture(target)
assert c.server_onupdate is target
assert target.column is c
def test_server_default_onupdate_keyword_as_schemaitem(self):
target = schema.DefaultClause("y", for_update=True)
c = self._fixture(server_onupdate=target)
assert c.server_onupdate is target
assert target.column is c
def test_server_default_onupdate_keyword_as_clause(self):
target = "y"
c = self._fixture(server_onupdate=target)
assert c.server_onupdate.arg == target
assert c.server_onupdate.column is c
def test_column_default_positional(self):
target = schema.ColumnDefault("y")
c = self._fixture(target)
assert c.default is target
assert target.column is c
def test_column_default_keyword_as_schemaitem(self):
target = schema.ColumnDefault("y")
c = self._fixture(default=target)
assert c.default is target
assert target.column is c
def test_column_default_keyword_as_clause(self):
target = "y"
c = self._fixture(default=target)
assert c.default.arg == target
assert c.default.column is c
def test_column_default_onupdate_positional(self):
target = schema.ColumnDefault("y", for_update=True)
c = self._fixture(target)
assert c.onupdate is target
assert target.column is c
def test_column_default_onupdate_keyword_as_schemaitem(self):
target = schema.ColumnDefault("y", for_update=True)
c = self._fixture(onupdate=target)
assert c.onupdate is target
assert target.column is c
def test_column_default_onupdate_keyword_as_clause(self):
target = "y"
c = self._fixture(onupdate=target)
assert c.onupdate.arg == target
assert c.onupdate.column is c
class ColumnOptionsTest(fixtures.TestBase):
def test_default_generators(self):
g1, g2 = Sequence("foo_id_seq"), ColumnDefault("f5")
assert Column(String, default=g1).default is g1
assert Column(String, onupdate=g1).onupdate is g1
assert Column(String, default=g2).default is g2
assert Column(String, onupdate=g2).onupdate is g2
def _null_type_no_error(self, col):
c_str = str(schema.CreateColumn(col).compile())
assert "NULL" in c_str
def _no_name_error(self, col):
assert_raises_message(
exc.ArgumentError,
"Column must be constructed with a non-blank name or "
"assign a non-blank .name",
Table,
"t",
MetaData(),
col,
)
def _no_error(self, col):
m = MetaData()
Table("bar", m, Column("id", Integer))
t = Table("t", m, col)
schema.CreateTable(t).compile()
def test_argument_signatures(self):
self._no_name_error(Column())
self._null_type_no_error(Column("foo"))
self._no_name_error(Column(default="foo"))
self._no_name_error(Column(Sequence("a")))
self._null_type_no_error(Column("foo", default="foo"))
self._null_type_no_error(Column("foo", Sequence("a")))
self._no_name_error(Column(ForeignKey("bar.id")))
self._no_error(Column("foo", ForeignKey("bar.id")))
self._no_name_error(Column(ForeignKey("bar.id"), default="foo"))
self._no_name_error(Column(ForeignKey("bar.id"), Sequence("a")))
self._no_error(Column("foo", ForeignKey("bar.id"), default="foo"))
self._no_error(Column("foo", ForeignKey("bar.id"), Sequence("a")))
def test_column_info(self):
c1 = Column("foo", String, info={"x": "y"})
c2 = Column("bar", String, info={})
c3 = Column("bat", String)
assert c1.info == {"x": "y"}
assert c2.info == {}
assert c3.info == {}
for c in (c1, c2, c3):
c.info["bar"] = "zip"
assert c.info["bar"] == "zip"
class CatchAllEventsTest(fixtures.RemovesEvents, fixtures.TestBase):
def test_all_events(self):
canary = []
def before_attach(obj, parent):
canary.append(
"%s->%s" % (obj.__class__.__name__, parent.__class__.__name__)
)
def after_attach(obj, parent):
canary.append("%s->%s" % (obj.__class__.__name__, parent))
self.event_listen(
schema.SchemaItem, "before_parent_attach", before_attach
)
self.event_listen(
schema.SchemaItem, "after_parent_attach", after_attach
)
m = MetaData()
Table(
"t1",
m,
Column("id", Integer, Sequence("foo_id"), primary_key=True),
Column("bar", String, ForeignKey("t2.id")),
)
Table("t2", m, Column("id", Integer, primary_key=True))
eq_(
canary,
[
"Sequence->Column",
"Sequence->id",
"ForeignKey->Column",
"ForeignKey->bar",
"Table->MetaData",
"PrimaryKeyConstraint->Table",
"PrimaryKeyConstraint->t1",
"Column->Table",
"Column->t1",
"Column->Table",
"Column->t1",
"ForeignKeyConstraint->Table",
"ForeignKeyConstraint->t1",
"Table->MetaData()",
"Table->MetaData",
"PrimaryKeyConstraint->Table",
"PrimaryKeyConstraint->t2",
"Column->Table",
"Column->t2",
"Table->MetaData()",
],
)
def test_events_per_constraint(self):
canary = []
def evt(target):
def before_attach(obj, parent):
canary.append(
"%s->%s" % (target.__name__, parent.__class__.__name__)
)
def after_attach(obj, parent):
assert hasattr(obj, "name") # so we can change it
canary.append("%s->%s" % (target.__name__, parent))
self.event_listen(target, "before_parent_attach", before_attach)
self.event_listen(target, "after_parent_attach", after_attach)
for target in [
schema.ForeignKeyConstraint,
schema.PrimaryKeyConstraint,
schema.UniqueConstraint,
schema.CheckConstraint,
schema.Index,
]:
evt(target)
m = MetaData()
Table(
"t1",
m,
Column("id", Integer, Sequence("foo_id"), primary_key=True),
Column("bar", String, ForeignKey("t2.id"), index=True),
Column("bat", Integer, unique=True),
)
Table(
"t2",
m,
Column("id", Integer, primary_key=True),
Column("bar", Integer),
Column("bat", Integer),
CheckConstraint("bar>5"),
UniqueConstraint("bar", "bat"),
Index(None, "bar", "bat"),
)
eq_(
canary,
[
"PrimaryKeyConstraint->Table",
"PrimaryKeyConstraint->t1",
"Index->Table",
"Index->t1",
"ForeignKeyConstraint->Table",
"ForeignKeyConstraint->t1",
"UniqueConstraint->Table",
"UniqueConstraint->t1",
"PrimaryKeyConstraint->Table",
"PrimaryKeyConstraint->t2",
"CheckConstraint->Table",
"CheckConstraint->t2",
"UniqueConstraint->Table",
"UniqueConstraint->t2",
"Index->Table",
"Index->t2",
],
)
class DialectKWArgTest(fixtures.TestBase):
@contextmanager
def _fixture(self):
from sqlalchemy.engine.default import DefaultDialect
class ParticipatingDialect(DefaultDialect):
construct_arguments = [
(schema.Index, {"x": 5, "y": False, "z_one": None}),
(schema.ForeignKeyConstraint, {"foobar": False}),
]
class ParticipatingDialect2(DefaultDialect):
construct_arguments = [
(schema.Index, {"x": 9, "y": True, "pp": "default"}),
(schema.Table, {"*": None}),
]
class NonParticipatingDialect(DefaultDialect):
construct_arguments = None
def load(dialect_name):
if dialect_name == "participating":
return ParticipatingDialect
elif dialect_name == "participating2":
return ParticipatingDialect2
elif dialect_name == "nonparticipating":
return NonParticipatingDialect
else:
raise exc.NoSuchModuleError("no dialect %r" % dialect_name)
with mock.patch("sqlalchemy.dialects.registry.load", load):
yield
def teardown_test(self):
Index._kw_registry.clear()
def test_participating(self):
with self._fixture():
idx = Index("a", "b", "c", participating_y=True)
eq_(
idx.dialect_options,
{"participating": {"x": 5, "y": True, "z_one": None}},
)
eq_(idx.dialect_kwargs, {"participating_y": True})
def test_nonparticipating(self):
with self._fixture():
idx = Index(
"a", "b", "c", nonparticipating_y=True, nonparticipating_q=5
)
eq_(
idx.dialect_kwargs,
{"nonparticipating_y": True, "nonparticipating_q": 5},
)
def test_bad_kwarg_raise(self):
with self._fixture():
assert_raises_message(
TypeError,
"Additional arguments should be named "
"<dialectname>_<argument>, got 'foobar'",
Index,
"a",
"b",
"c",
foobar=True,
)
def test_unknown_dialect_warning(self):
with self._fixture():
with testing.expect_warnings(
"Can't validate argument 'unknown_y'; can't locate "
"any SQLAlchemy dialect named 'unknown'",
):
Index("a", "b", "c", unknown_y=True)
def test_participating_bad_kw(self):
with self._fixture():
assert_raises_message(
exc.ArgumentError,
"Argument 'participating_q_p_x' is not accepted by dialect "
"'participating' on behalf of "
"<class 'sqlalchemy.sql.schema.Index'>",
Index,
"a",
"b",
"c",
participating_q_p_x=8,
)
def test_participating_unknown_schema_item(self):
with self._fixture():
# the dialect doesn't include UniqueConstraint in
# its registry at all.
assert_raises_message(
exc.ArgumentError,
"Argument 'participating_q_p_x' is not accepted by dialect "
"'participating' on behalf of "
"<class 'sqlalchemy.sql.schema.UniqueConstraint'>",
UniqueConstraint,
"a",
"b",
participating_q_p_x=8,
)
@testing.emits_warning("Can't validate")
def test_unknown_dialect_warning_still_populates(self):
with self._fixture():
idx = Index("a", "b", "c", unknown_y=True)
eq_(idx.dialect_kwargs, {"unknown_y": True}) # still populates
@testing.emits_warning("Can't validate")
def test_unknown_dialect_warning_still_populates_multiple(self):
with self._fixture():
idx = Index(
"a",
"b",
"c",
unknown_y=True,
unknown_z=5,
otherunknown_foo="bar",
participating_y=8,
)
eq_(
idx.dialect_options,
{
"unknown": {"y": True, "z": 5, "*": None},
"otherunknown": {"foo": "bar", "*": None},
"participating": {"x": 5, "y": 8, "z_one": None},
},
)
eq_(
idx.dialect_kwargs,
{
"unknown_z": 5,
"participating_y": 8,
"unknown_y": True,
"otherunknown_foo": "bar",
},
) # still populates
def test_combined(self):
with self._fixture():
idx = Index(
"a", "b", "c", participating_x=7, nonparticipating_y=True
)
eq_(
idx.dialect_options,
{
"participating": {"y": False, "x": 7, "z_one": None},
"nonparticipating": {"y": True, "*": None},
},
)
eq_(
idx.dialect_kwargs,
{"participating_x": 7, "nonparticipating_y": True},
)
def test_multiple_participating(self):
with self._fixture():
idx = Index(
"a",
"b",
"c",
participating_x=7,
participating2_x=15,
participating2_y="lazy",
)
eq_(
idx.dialect_options,
{
"participating": {"x": 7, "y": False, "z_one": None},
"participating2": {"x": 15, "y": "lazy", "pp": "default"},
},
)
eq_(
idx.dialect_kwargs,
{
"participating_x": 7,
"participating2_x": 15,
"participating2_y": "lazy",
},
)
def test_foreign_key_propagate(self):
with self._fixture():
m = MetaData()
fk = ForeignKey("t2.id", participating_foobar=True)
t = Table("t", m, Column("id", Integer, fk))
fkc = [
c for c in t.constraints if isinstance(c, ForeignKeyConstraint)
][0]
eq_(fkc.dialect_kwargs, {"participating_foobar": True})
def test_foreign_key_propagate_exceptions_delayed(self):
with self._fixture():
m = MetaData()
fk = ForeignKey("t2.id", participating_fake=True)
c1 = Column("id", Integer, fk)
assert_raises_message(
exc.ArgumentError,
"Argument 'participating_fake' is not accepted by "
"dialect 'participating' on behalf of "
"<class 'sqlalchemy.sql.schema.ForeignKeyConstraint'>",
Table,
"t",
m,
c1,
)
def test_wildcard(self):
with self._fixture():
m = MetaData()
t = Table(
"x",
m,
Column("x", Integer),
participating2_xyz="foo",
participating2_engine="InnoDB",
)
eq_(
t.dialect_kwargs,
{
"participating2_xyz": "foo",
"participating2_engine": "InnoDB",
},
)
def test_uninit_wildcard(self):
with self._fixture():
m = MetaData()
t = Table("x", m, Column("x", Integer))
eq_(t.dialect_options["participating2"], {"*": None})
eq_(t.dialect_kwargs, {})
def test_not_contains_wildcard(self):
with self._fixture():
m = MetaData()
t = Table("x", m, Column("x", Integer))
assert "foobar" not in t.dialect_options["participating2"]
def test_contains_wildcard(self):
with self._fixture():
m = MetaData()
t = Table("x", m, Column("x", Integer), participating2_foobar=5)
assert "foobar" in t.dialect_options["participating2"]
def test_update(self):
with self._fixture():
idx = Index("a", "b", "c", participating_x=20)
eq_(idx.dialect_kwargs, {"participating_x": 20})
idx._validate_dialect_kwargs(
{"participating_x": 25, "participating_z_one": "default"}
)
eq_(
idx.dialect_options,
{"participating": {"x": 25, "y": False, "z_one": "default"}},
)
eq_(
idx.dialect_kwargs,
{"participating_x": 25, "participating_z_one": "default"},
)
idx._validate_dialect_kwargs(
{"participating_x": 25, "participating_z_one": "default"}
)
eq_(
idx.dialect_options,
{"participating": {"x": 25, "y": False, "z_one": "default"}},
)
eq_(
idx.dialect_kwargs,
{"participating_x": 25, "participating_z_one": "default"},
)
idx._validate_dialect_kwargs(
{"participating_y": True, "participating2_y": "p2y"}
)
eq_(
idx.dialect_options,
{
"participating": {"x": 25, "y": True, "z_one": "default"},
"participating2": {"y": "p2y", "pp": "default", "x": 9},
},
)
eq_(
idx.dialect_kwargs,
{
"participating_x": 25,
"participating_y": True,
"participating2_y": "p2y",
"participating_z_one": "default",
},
)
def test_key_error_kwargs_no_dialect(self):
with self._fixture():
idx = Index("a", "b", "c")
assert_raises(KeyError, idx.kwargs.__getitem__, "foo_bar")
def test_key_error_kwargs_no_underscore(self):
with self._fixture():
idx = Index("a", "b", "c")
assert_raises(KeyError, idx.kwargs.__getitem__, "foobar")
def test_key_error_kwargs_no_argument(self):
with self._fixture():
idx = Index("a", "b", "c")
assert_raises(
KeyError, idx.kwargs.__getitem__, "participating_asdmfq34098"
)
assert_raises(
KeyError,
idx.kwargs.__getitem__,
"nonparticipating_asdmfq34098",
)
def test_key_error_dialect_options(self):
with self._fixture():
idx = Index("a", "b", "c")
assert_raises(
KeyError,
idx.dialect_options["participating"].__getitem__,
"asdfaso890",
)
assert_raises(
KeyError,
idx.dialect_options["nonparticipating"].__getitem__,
"asdfaso890",
)
def test_ad_hoc_participating_via_opt(self):
with self._fixture():
idx = Index("a", "b", "c")
idx.dialect_options["participating"]["foobar"] = 5
eq_(idx.dialect_options["participating"]["foobar"], 5)
eq_(idx.kwargs["participating_foobar"], 5)
def test_ad_hoc_nonparticipating_via_opt(self):
with self._fixture():
idx = Index("a", "b", "c")
idx.dialect_options["nonparticipating"]["foobar"] = 5
eq_(idx.dialect_options["nonparticipating"]["foobar"], 5)
eq_(idx.kwargs["nonparticipating_foobar"], 5)
def test_ad_hoc_participating_via_kwargs(self):
with self._fixture():
idx = Index("a", "b", "c")
idx.kwargs["participating_foobar"] = 5
eq_(idx.dialect_options["participating"]["foobar"], 5)
eq_(idx.kwargs["participating_foobar"], 5)
def test_ad_hoc_nonparticipating_via_kwargs(self):
with self._fixture():
idx = Index("a", "b", "c")
idx.kwargs["nonparticipating_foobar"] = 5
eq_(idx.dialect_options["nonparticipating"]["foobar"], 5)
eq_(idx.kwargs["nonparticipating_foobar"], 5)
def test_ad_hoc_via_kwargs_invalid_key(self):
with self._fixture():
idx = Index("a", "b", "c")
assert_raises_message(
exc.ArgumentError,
"Keys must be of the form <dialectname>_<argname>",
idx.kwargs.__setitem__,
"foobar",
5,
)
def test_ad_hoc_via_kwargs_invalid_dialect(self):
with self._fixture():
idx = Index("a", "b", "c")
assert_raises_message(
exc.ArgumentError,
"no dialect 'nonexistent'",
idx.kwargs.__setitem__,
"nonexistent_foobar",
5,
)
def test_add_new_arguments_participating(self):
with self._fixture():
Index.argument_for("participating", "xyzqpr", False)
idx = Index("a", "b", "c", participating_xyzqpr=True)
eq_(idx.kwargs["participating_xyzqpr"], True)
idx = Index("a", "b", "c")
eq_(idx.dialect_options["participating"]["xyzqpr"], False)
def test_add_new_arguments_participating_no_existing(self):
with self._fixture():
PrimaryKeyConstraint.argument_for("participating", "xyzqpr", False)
pk = PrimaryKeyConstraint("a", "b", "c", participating_xyzqpr=True)
eq_(pk.kwargs["participating_xyzqpr"], True)
pk = PrimaryKeyConstraint("a", "b", "c")
eq_(pk.dialect_options["participating"]["xyzqpr"], False)
def test_add_new_arguments_nonparticipating(self):
with self._fixture():
assert_raises_message(
exc.ArgumentError,
"Dialect 'nonparticipating' does have keyword-argument "
"validation and defaults enabled configured",
Index.argument_for,
"nonparticipating",
"xyzqpr",
False,
)
def test_add_new_arguments_invalid_dialect(self):
with self._fixture():
assert_raises_message(
exc.ArgumentError,
"no dialect 'nonexistent'",
Index.argument_for,
"nonexistent",
"foobar",
5,
)
class NamingConventionTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def _fixture(self, naming_convention, table_schema=None):
m1 = MetaData(naming_convention=naming_convention)
u1 = Table(
"user",
m1,
Column("id", Integer, primary_key=True),
Column("version", Integer, primary_key=True),
Column("data", String(30)),
Column("Data2", String(30), key="data2"),
Column("Data3", String(30), key="data3"),
schema=table_schema,
)
return u1
def _colliding_name_fixture(self, naming_convention, id_flags):
m1 = MetaData(naming_convention=naming_convention)
t1 = Table(
"foo",
m1,
Column("id", Integer, **id_flags),
Column("foo_id", Integer),
)
return t1
def test_colliding_col_label_from_index_flag(self):
t1 = self._colliding_name_fixture(
{"ix": "ix_%(column_0_label)s"}, {"index": True}
)
idx = list(t1.indexes)[0]
# name is generated up front. alembic really prefers this
eq_(idx.name, "ix_foo_id")
self.assert_compile(
CreateIndex(idx), "CREATE INDEX ix_foo_id ON foo (id)"
)
def test_colliding_col_label_from_unique_flag(self):
t1 = self._colliding_name_fixture(
{"uq": "uq_%(column_0_label)s"}, {"unique": True}
)
const = [c for c in t1.constraints if isinstance(c, UniqueConstraint)]
uq = const[0]
# name is generated up front. alembic really prefers this
eq_(uq.name, "uq_foo_id")
self.assert_compile(
AddConstraint(uq),
"ALTER TABLE foo ADD CONSTRAINT uq_foo_id UNIQUE (id)",
)
def test_colliding_col_label_from_index_obj(self):
t1 = self._colliding_name_fixture({"ix": "ix_%(column_0_label)s"}, {})
idx = Index(None, t1.c.id)
is_(idx, list(t1.indexes)[0])
eq_(idx.name, "ix_foo_id")
self.assert_compile(
CreateIndex(idx), "CREATE INDEX ix_foo_id ON foo (id)"
)
def test_colliding_col_label_from_unique_obj(self):
t1 = self._colliding_name_fixture({"uq": "uq_%(column_0_label)s"}, {})
uq = UniqueConstraint(t1.c.id)
const = [c for c in t1.constraints if isinstance(c, UniqueConstraint)]
is_(const[0], uq)
eq_(const[0].name, "uq_foo_id")
self.assert_compile(
AddConstraint(const[0]),
"ALTER TABLE foo ADD CONSTRAINT uq_foo_id UNIQUE (id)",
)
def test_colliding_col_label_from_index_flag_no_conv(self):
t1 = self._colliding_name_fixture({"ck": "foo"}, {"index": True})
idx = list(t1.indexes)[0]
# this behavior needs to fail, as of #4911 since we are testing it,
# ensure it raises a CompileError. In #4289 we may want to revisit
# this in some way, most likely specifically to Postgresql only.
assert_raises_message(
exc.CompileError,
"CREATE INDEX requires that the index have a name",
CreateIndex(idx).compile,
)
assert_raises_message(
exc.CompileError,
"DROP INDEX requires that the index have a name",
DropIndex(idx).compile,
)
def test_colliding_col_label_from_unique_flag_no_conv(self):
t1 = self._colliding_name_fixture({"ck": "foo"}, {"unique": True})
const = [c for c in t1.constraints if isinstance(c, UniqueConstraint)]
is_(const[0].name, None)
self.assert_compile(
AddConstraint(const[0]), "ALTER TABLE foo ADD UNIQUE (id)"
)
@testing.combinations(
("nopk",),
("column",),
("constraint",),
("explicit_name",),
argnames="pktype",
)
@testing.combinations(
("pk_%(table_name)s", "pk_t1"),
("pk_%(column_0_name)s", "pk_x"),
("pk_%(column_0_N_name)s", "pk_x_y"),
("pk_%(column_0_N_label)s", "pk_t1_x_t1_y"),
("%(column_0_name)s", "x"),
("%(column_0N_name)s", "xy"),
argnames="conv, expected_name",
)
def test_pk_conventions(self, conv, expected_name, pktype):
m1 = MetaData(naming_convention={"pk": conv})
if pktype == "column":
t1 = Table(
"t1",
m1,
Column("x", Integer, primary_key=True),
Column("y", Integer, primary_key=True),
)
elif pktype == "constraint":
t1 = Table(
"t1",
m1,
Column("x", Integer),
Column("y", Integer),
PrimaryKeyConstraint("x", "y"),
)
elif pktype == "nopk":
t1 = Table(
"t1",
m1,
Column("x", Integer, nullable=False),
Column("y", Integer, nullable=False),
)
expected_name = None
elif pktype == "explicit_name":
t1 = Table(
"t1",
m1,
Column("x", Integer, primary_key=True),
Column("y", Integer, primary_key=True),
PrimaryKeyConstraint("x", "y", name="myname"),
)
expected_name = "myname"
if expected_name:
eq_(t1.primary_key.name, expected_name)
if pktype == "nopk":
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE t1 (x INTEGER NOT NULL, y INTEGER NOT NULL)",
)
else:
self.assert_compile(
schema.CreateTable(t1),
"CREATE TABLE t1 (x INTEGER NOT NULL, y INTEGER NOT NULL, "
"CONSTRAINT %s PRIMARY KEY (x, y))" % expected_name,
)
def test_uq_name(self):
u1 = self._fixture(
naming_convention={"uq": "uq_%(table_name)s_%(column_0_name)s"}
)
uq = UniqueConstraint(u1.c.data)
eq_(uq.name, "uq_user_data")
def test_uq_conv_name(self):
u1 = self._fixture(
naming_convention={"uq": "uq_%(table_name)s_%(column_0_name)s"}
)
uq = UniqueConstraint(u1.c.data, name=naming.conv("myname"))
self.assert_compile(
schema.AddConstraint(uq),
'ALTER TABLE "user" ADD CONSTRAINT myname UNIQUE (data)',
dialect="default",
)
def test_uq_defer_name_convention(self):
u1 = self._fixture(
naming_convention={"uq": "uq_%(table_name)s_%(column_0_name)s"}
)
uq = UniqueConstraint(u1.c.data, name=naming._NONE_NAME)
self.assert_compile(
schema.AddConstraint(uq),
'ALTER TABLE "user" ADD CONSTRAINT uq_user_data UNIQUE (data)',
dialect="default",
)
def test_uq_key(self):
u1 = self._fixture(
naming_convention={"uq": "uq_%(table_name)s_%(column_0_key)s"}
)
uq = UniqueConstraint(u1.c.data, u1.c.data2)
eq_(uq.name, "uq_user_data")
def test_uq_label(self):
u1 = self._fixture(
naming_convention={"uq": "uq_%(table_name)s_%(column_0_label)s"}
)
uq = UniqueConstraint(u1.c.data, u1.c.data2)
eq_(uq.name, "uq_user_user_data")
def test_uq_allcols_underscore_name(self):
u1 = self._fixture(
naming_convention={"uq": "uq_%(table_name)s_%(column_0_N_name)s"}
)
uq = UniqueConstraint(u1.c.data, u1.c.data2, u1.c.data3)
eq_(uq.name, "uq_user_data_Data2_Data3")
def test_uq_allcols_merged_name(self):
u1 = self._fixture(
naming_convention={"uq": "uq_%(table_name)s_%(column_0N_name)s"}
)
uq = UniqueConstraint(u1.c.data, u1.c.data2, u1.c.data3)
eq_(uq.name, "uq_user_dataData2Data3")
def test_uq_allcols_merged_key(self):
u1 = self._fixture(
naming_convention={"uq": "uq_%(table_name)s_%(column_0N_key)s"}
)
uq = UniqueConstraint(u1.c.data, u1.c.data2, u1.c.data3)
eq_(uq.name, "uq_user_datadata2data3")
def test_uq_allcols_truncated_name(self):
u1 = self._fixture(
naming_convention={"uq": "uq_%(table_name)s_%(column_0N_name)s"}
)
uq = UniqueConstraint(u1.c.data, u1.c.data2, u1.c.data3)
dialect = default.DefaultDialect()
self.assert_compile(
schema.AddConstraint(uq),
'ALTER TABLE "user" ADD '
'CONSTRAINT "uq_user_dataData2Data3" '
'UNIQUE (data, "Data2", "Data3")',
dialect=dialect,
)
dialect.max_identifier_length = 15
self.assert_compile(
schema.AddConstraint(uq),
'ALTER TABLE "user" ADD '
'CONSTRAINT uq_user_2769 UNIQUE (data, "Data2", "Data3")',
dialect=dialect,
)
def test_fk_allcols_underscore_name(self):
u1 = self._fixture(
naming_convention={
"fk": "fk_%(table_name)s_%(column_0_N_name)s_"
"%(referred_table_name)s_%(referred_column_0_N_name)s"
}
)
m1 = u1.metadata
a1 = Table(
"address",
m1,
Column("id", Integer, primary_key=True),
Column("UserData", String(30), key="user_data"),
Column("UserData2", String(30), key="user_data2"),
Column("UserData3", String(30), key="user_data3"),
)
fk = ForeignKeyConstraint(
["user_data", "user_data2", "user_data3"],
["user.data", "user.data2", "user.data3"],
)
a1.append_constraint(fk)
self.assert_compile(
schema.AddConstraint(fk),
"ALTER TABLE address ADD CONSTRAINT "
'"fk_address_UserData_UserData2_UserData3_user_data_Data2_Data3" '
'FOREIGN KEY("UserData", "UserData2", "UserData3") '
'REFERENCES "user" (data, "Data2", "Data3")',
dialect=default.DefaultDialect(),
)
def test_fk_allcols_merged_name(self):
u1 = self._fixture(
naming_convention={
"fk": "fk_%(table_name)s_%(column_0N_name)s_"
"%(referred_table_name)s_%(referred_column_0N_name)s"
}
)
m1 = u1.metadata
a1 = Table(
"address",
m1,
Column("id", Integer, primary_key=True),
Column("UserData", String(30), key="user_data"),
Column("UserData2", String(30), key="user_data2"),
Column("UserData3", String(30), key="user_data3"),
)
fk = ForeignKeyConstraint(
["user_data", "user_data2", "user_data3"],
["user.data", "user.data2", "user.data3"],
)
a1.append_constraint(fk)
self.assert_compile(
schema.AddConstraint(fk),
"ALTER TABLE address ADD CONSTRAINT "
'"fk_address_UserDataUserData2UserData3_user_dataData2Data3" '
'FOREIGN KEY("UserData", "UserData2", "UserData3") '
'REFERENCES "user" (data, "Data2", "Data3")',
dialect=default.DefaultDialect(),
)
def test_fk_allcols_truncated_name(self):
u1 = self._fixture(
naming_convention={
"fk": "fk_%(table_name)s_%(column_0N_name)s_"
"%(referred_table_name)s_%(referred_column_0N_name)s"
}
)
m1 = u1.metadata
a1 = Table(
"address",
m1,
Column("id", Integer, primary_key=True),
Column("UserData", String(30), key="user_data"),
Column("UserData2", String(30), key="user_data2"),
Column("UserData3", String(30), key="user_data3"),
)
fk = ForeignKeyConstraint(
["user_data", "user_data2", "user_data3"],
["user.data", "user.data2", "user.data3"],
)
a1.append_constraint(fk)
dialect = default.DefaultDialect()
dialect.max_identifier_length = 15
self.assert_compile(
schema.AddConstraint(fk),
"ALTER TABLE address ADD CONSTRAINT "
"fk_addr_f9ff "
'FOREIGN KEY("UserData", "UserData2", "UserData3") '
'REFERENCES "user" (data, "Data2", "Data3")',
dialect=dialect,
)
def test_ix_allcols_truncation(self):
u1 = self._fixture(
naming_convention={"ix": "ix_%(table_name)s_%(column_0N_name)s"}
)
ix = Index(None, u1.c.data, u1.c.data2, u1.c.data3)
dialect = default.DefaultDialect()
dialect.max_identifier_length = 15
self.assert_compile(
schema.CreateIndex(ix),
"CREATE INDEX ix_user_2de9 ON " '"user" (data, "Data2", "Data3")',
dialect=dialect,
)
def test_ix_name(self):
u1 = self._fixture(
naming_convention={"ix": "ix_%(table_name)s_%(column_0_name)s"}
)
ix = Index(None, u1.c.data)
eq_(ix.name, "ix_user_data")
def test_ck_name_required(self):
u1 = self._fixture(
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
ck = CheckConstraint(u1.c.data == "x", name="mycheck")
eq_(ck.name, "ck_user_mycheck")
assert_raises_message(
exc.InvalidRequestError,
r"Naming convention including %\(constraint_name\)s token "
"requires that constraint is explicitly named.",
CheckConstraint,
u1.c.data == "x",
)
def test_ck_name_deferred_required(self):
u1 = self._fixture(
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
ck = CheckConstraint(u1.c.data == "x", name=naming._NONE_NAME)
assert_raises_message(
exc.InvalidRequestError,
r"Naming convention including %\(constraint_name\)s token "
"requires that constraint is explicitly named.",
schema.AddConstraint(ck).compile,
)
def test_column_attached_ck_name(self):
m = MetaData(
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
ck = CheckConstraint("x > 5", name="x1")
Table("t", m, Column("x", ck))
eq_(ck.name, "ck_t_x1")
def test_table_attached_ck_name(self):
m = MetaData(
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
ck = CheckConstraint("x > 5", name="x1")
Table("t", m, Column("x", Integer), ck)
eq_(ck.name, "ck_t_x1")
def test_uq_name_already_conv(self):
m = MetaData(
naming_convention={
"uq": "uq_%(constraint_name)s_%(column_0_name)s"
}
)
t = Table("mytable", m)
uq = UniqueConstraint(name=naming.conv("my_special_key"))
t.append_constraint(uq)
eq_(uq.name, "my_special_key")
def test_fk_name_schema(self):
u1 = self._fixture(
naming_convention={
"fk": "fk_%(table_name)s_%(column_0_name)s_"
"%(referred_table_name)s_%(referred_column_0_name)s"
},
table_schema="foo",
)
m1 = u1.metadata
a1 = Table(
"address",
m1,
Column("id", Integer, primary_key=True),
Column("user_id", Integer),
Column("user_version_id", Integer),
)
fk = ForeignKeyConstraint(
["user_id", "user_version_id"], ["foo.user.id", "foo.user.version"]
)
a1.append_constraint(fk)
eq_(fk.name, "fk_address_user_id_user_id")
def test_fk_attrs(self):
u1 = self._fixture(
naming_convention={
"fk": "fk_%(table_name)s_%(column_0_name)s_"
"%(referred_table_name)s_%(referred_column_0_name)s"
}
)
m1 = u1.metadata
a1 = Table(
"address",
m1,
Column("id", Integer, primary_key=True),
Column("user_id", Integer),
Column("user_version_id", Integer),
)
fk = ForeignKeyConstraint(
["user_id", "user_version_id"], ["user.id", "user.version"]
)
a1.append_constraint(fk)
eq_(fk.name, "fk_address_user_id_user_id")
def test_custom(self):
def key_hash(const, table):
return "HASH_%s" % table.name
u1 = self._fixture(
naming_convention={
"fk": "fk_%(table_name)s_%(key_hash)s",
"key_hash": key_hash,
}
)
m1 = u1.metadata
a1 = Table(
"address",
m1,
Column("id", Integer, primary_key=True),
Column("user_id", Integer),
Column("user_version_id", Integer),
)
fk = ForeignKeyConstraint(
["user_id", "user_version_id"], ["user.id", "user.version"]
)
a1.append_constraint(fk)
eq_(fk.name, "fk_address_HASH_address")
def test_schematype_ck_name_boolean(self):
m1 = MetaData(
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
u1 = Table(
"user",
m1,
Column("x", Boolean(name="foo", create_constraint=True)),
)
self.assert_compile(
schema.CreateTable(u1),
'CREATE TABLE "user" ('
"x BOOLEAN, "
"CONSTRAINT ck_user_foo CHECK (x IN (0, 1))"
")",
)
# test no side effects from first compile
self.assert_compile(
schema.CreateTable(u1),
'CREATE TABLE "user" ('
"x BOOLEAN, "
"CONSTRAINT ck_user_foo CHECK (x IN (0, 1))"
")",
)
def test_schematype_ck_name_boolean_not_on_name(self):
m1 = MetaData(
naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}
)
u1 = Table("user", m1, Column("x", Boolean(create_constraint=True)))
# constraint is not hit
is_(
[c for c in u1.constraints if isinstance(c, CheckConstraint)][
0
].name,
_NONE_NAME,
)
# but is hit at compile time
self.assert_compile(
schema.CreateTable(u1),
'CREATE TABLE "user" ('
"x BOOLEAN, "
"CONSTRAINT ck_user_x CHECK (x IN (0, 1))"
")",
)
def test_schematype_ck_name_enum(self):
m1 = MetaData(
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
u1 = Table(
"user",
m1,
Column("x", Enum("a", "b", name="foo", create_constraint=True)),
)
self.assert_compile(
schema.CreateTable(u1),
'CREATE TABLE "user" ('
"x VARCHAR(1), "
"CONSTRAINT ck_user_foo CHECK (x IN ('a', 'b'))"
")",
)
# test no side effects from first compile
self.assert_compile(
schema.CreateTable(u1),
'CREATE TABLE "user" ('
"x VARCHAR(1), "
"CONSTRAINT ck_user_foo CHECK (x IN ('a', 'b'))"
")",
)
def test_schematype_ck_name_propagate_conv(self):
m1 = MetaData(
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
u1 = Table(
"user",
m1,
Column(
"x",
Enum(
"a", "b", name=naming.conv("foo"), create_constraint=True
),
),
)
eq_(
[c for c in u1.constraints if isinstance(c, CheckConstraint)][
0
].name,
"foo",
)
# but is hit at compile time
self.assert_compile(
schema.CreateTable(u1),
'CREATE TABLE "user" ('
"x VARCHAR(1), "
"CONSTRAINT foo CHECK (x IN ('a', 'b'))"
")",
)
def test_schematype_ck_name_boolean_no_name(self):
m1 = MetaData(
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
u1 = Table("user", m1, Column("x", Boolean(create_constraint=True)))
# constraint gets special _defer_none_name
is_(
[c for c in u1.constraints if isinstance(c, CheckConstraint)][
0
].name,
_NONE_NAME,
)
# no issue with native boolean
self.assert_compile(
schema.CreateTable(u1),
'CREATE TABLE "user" (' "x BOOLEAN" ")",
dialect="postgresql",
)
assert_raises_message(
exc.InvalidRequestError,
r"Naming convention including \%\(constraint_name\)s token "
r"requires that constraint is explicitly named.",
schema.CreateTable(u1).compile,
dialect=default.DefaultDialect(),
)
def test_schematype_no_ck_name_boolean_no_name(self):
m1 = MetaData() # no naming convention
u1 = Table("user", m1, Column("x", Boolean(create_constraint=True)))
# constraint gets special _defer_none_name
is_(
[c for c in u1.constraints if isinstance(c, CheckConstraint)][
0
].name,
_NONE_NAME,
)
self.assert_compile(
schema.CreateTable(u1),
'CREATE TABLE "user" (x BOOLEAN, CHECK (x IN (0, 1)))',
)
def test_ck_constraint_redundant_event(self):
u1 = self._fixture(
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
ck1 = CheckConstraint(u1.c.version > 3, name="foo")
u1.append_constraint(ck1)
u1.append_constraint(ck1)
u1.append_constraint(ck1)
eq_(ck1.name, "ck_user_foo")
def test_pickle_metadata(self):
m = MetaData(naming_convention={"pk": "%(table_name)s_pk"})
m2 = pickle.loads(pickle.dumps(m))
eq_(m2.naming_convention, {"pk": "%(table_name)s_pk"})
t2a = Table("t2", m, Column("id", Integer, primary_key=True))
t2b = Table("t2", m2, Column("id", Integer, primary_key=True))
eq_(t2a.primary_key.name, t2b.primary_key.name)
eq_(t2b.primary_key.name, "t2_pk")
def test_expression_index(self):
m = MetaData(naming_convention={"ix": "ix_%(column_0_label)s"})
t = Table("t", m, Column("q", Integer), Column("p", Integer))
ix = Index(None, t.c.q + 5)
t.append_constraint(ix)
# huh. pretty cool
self.assert_compile(
CreateIndex(ix), "CREATE INDEX ix_t_q ON t (q + 5)"
)
class CopyDialectOptionsTest(fixtures.TestBase):
@contextmanager
def _fixture(self):
from sqlalchemy.engine.default import DefaultDialect
class CopyDialectOptionsTestDialect(DefaultDialect):
construct_arguments = [
(Table, {"some_table_arg": None}),
(Column, {"some_column_arg": None}),
(Index, {"some_index_arg": None}),
(PrimaryKeyConstraint, {"some_pk_arg": None}),
(UniqueConstraint, {"some_uq_arg": None}),
]
def load(dialect_name):
if dialect_name == "copydialectoptionstest":
return CopyDialectOptionsTestDialect
else:
raise exc.NoSuchModuleError("no dialect %r" % dialect_name)
with mock.patch("sqlalchemy.dialects.registry.load", load):
yield
@classmethod
def check_dialect_options_(cls, t):
eq_(
t.dialect_kwargs["copydialectoptionstest_some_table_arg"],
"a1",
)
eq_(
t.c.foo.dialect_kwargs["copydialectoptionstest_some_column_arg"],
"a2",
)
eq_(
t.primary_key.dialect_kwargs["copydialectoptionstest_some_pk_arg"],
"a3",
)
eq_(
list(t.indexes)[0].dialect_kwargs[
"copydialectoptionstest_some_index_arg"
],
"a4",
)
eq_(
list(c for c in t.constraints if isinstance(c, UniqueConstraint))[
0
].dialect_kwargs["copydialectoptionstest_some_uq_arg"],
"a5",
)
def test_dialect_options_are_copied(self):
with self._fixture():
t1 = Table(
"t",
MetaData(),
Column(
"foo",
Integer,
copydialectoptionstest_some_column_arg="a2",
),
Column("bar", Integer),
PrimaryKeyConstraint(
"foo", copydialectoptionstest_some_pk_arg="a3"
),
UniqueConstraint(
"bar", copydialectoptionstest_some_uq_arg="a5"
),
copydialectoptionstest_some_table_arg="a1",
)
Index(
"idx",
t1.c.foo,
copydialectoptionstest_some_index_arg="a4",
)
self.check_dialect_options_(t1)
m2 = MetaData()
t2 = t1.to_metadata(m2) # make a copy
self.check_dialect_options_(t2)
| {
"content_hash": "f61c2e8708aaef4cc69cedcf4a4b1d19",
"timestamp": "",
"source": "github",
"line_count": 5548,
"max_line_length": 79,
"avg_line_length": 31.78226387887527,
"alnum_prop": 0.518374846876276,
"repo_name": "monetate/sqlalchemy",
"id": "08502b8bbebbc4ffe3050466f39f2e675d045cbf",
"size": "176328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/sql/test_metadata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "49142"
},
{
"name": "Python",
"bytes": "11790244"
}
],
"symlink_target": ""
} |
import logging
from idl.matter_idl_types import *
from typing import Optional, Union, List
from .context import Context, IdlPostProcessor
from .parsing import ParseInt, AttrsToAccessPrivilege, AttrsToAttribute
from .base import HandledDepth, BaseHandler
class ClusterNameHandler(BaseHandler):
"""Handles /configurator/cluster/name elements."""
def __init__(self, context: Context, cluster: Cluster):
super().__init__(context, handled=HandledDepth.SINGLE_TAG)
self._cluster = cluster
def HandleContent(self, content):
self._cluster.name = content.replace(' ', '')
class AttributeDescriptionHandler(BaseHandler):
"""Handles /configurator/cluster/attribute/description elements."""
def __init__(self, context: Context, attribute: Attribute):
super().__init__(context, handled=HandledDepth.SINGLE_TAG)
self._attribute = attribute
def HandleContent(self, content: str):
self._attribute.definition.name = content.replace(' ', '')
class ClusterCodeHandler(BaseHandler):
"""Handles /configurator/cluster/code elements."""
def __init__(self, context: Context, cluster: Cluster):
super().__init__(context, handled=HandledDepth.SINGLE_TAG)
self._cluster = cluster
def HandleContent(self, content: str):
self._cluster.code = ParseInt(content)
class EventHandler(BaseHandler):
"""Handles /configurator/cluster/event elements."""
def __init__(self, context: Context, cluster: Cluster, attrs):
super().__init__(context)
self._cluster = cluster
if attrs['priority'] == 'debug':
priority = EventPriority.DEBUG
elif attrs['priority'] == 'info':
priority = EventPriority.INFO
elif attrs['priority'] == 'critical':
priority = EventPriority.CRITICAL
else:
raise Exception("Unknown event priority: %s" % attrs['priority'])
self._event = Event(
priority=priority,
code=ParseInt(attrs['code']),
name=attrs['name'],
fields=[],
)
if attrs.get('isFabricSensitive', "false").lower() == 'true':
self._event.qualities |= EventQuality.FABRIC_SENSITIVE
def GetNextProcessor(self, name: str, attrs):
if name.lower() == 'field':
data_type = DataType(name=attrs['type'])
if 'length' in attrs:
data_type.max_length = ParseInt(attrs['length'])
field = Field(
data_type=data_type,
code=ParseInt(attrs['id']),
name=attrs['name'],
is_list=(attrs.get('array', 'false').lower() == 'true'),
)
if attrs.get('optional', "false").lower() == 'true':
field.qualities |= FieldQuality.OPTIONAL
if attrs.get('isNullable', "false").lower() == 'true':
field.qualities |= FieldQuality.NULLABLE
self._event.fields.append(field)
return BaseHandler(self.context, handled=HandledDepth.SINGLE_TAG)
elif name.lower() == 'access':
self._event.readacl = AttrsToAccessPrivilege(attrs)
return BaseHandler(self.context, handled=HandledDepth.SINGLE_TAG)
elif name.lower() == 'description':
return BaseHandler(self.context, handled=HandledDepth.ENTIRE_TREE)
else:
return BaseHandler(self.context)
def EndProcessing(self):
self._cluster.events.append(self._event)
class AttributeHandler(BaseHandler):
"""Handles /configurator/cluster/attribute elements."""
def __init__(self, context: Context, cluster: Cluster, attrs):
super().__init__(context)
self._cluster = cluster
self._attribute = AttrsToAttribute(attrs)
def GetNextProcessor(self, name: str, attrs):
if name.lower() == 'access':
# Modifier not currently used: fabric scoped exists on the structure itself.
if 'modifier' in attrs:
if attrs['modifier'] != 'fabric-scoped':
raise Exception("UNKNOWN MODIFIER: %s" % attrs['modifier'])
if ('role' in attrs) or ('privilege' in attrs):
role = AttrsToAccessPrivilege(attrs)
if attrs['op'] == 'read':
self._attribute.readacl = role
elif attrs['op'] == 'write':
self._attribute.writeacl = role
else:
logging.error("Unknown access: %r" % attrs['op'])
return BaseHandler(self.context, handled=HandledDepth.SINGLE_TAG)
elif name.lower() == 'description':
return AttributeDescriptionHandler(self.context, self._attribute)
else:
return BaseHandler(self.context)
def HandleContent(self, content: str):
# Content generally is the name EXCEPT if access controls
# exist, in which case `description` contains the name
content = content.strip()
if content and not self._attribute.definition.name:
self._attribute.definition.name = content
def EndProcessing(self):
if self._attribute.definition.name is None:
raise Exception("Name for attribute was not parsed.")
self._cluster.attributes.append(self._attribute)
class StructHandler(BaseHandler, IdlPostProcessor):
""" Handling /configurator/struct elements."""
def __init__(self, context: Context, attrs):
super().__init__(context)
# if set, struct belongs to a specific cluster
self._cluster_codes = set()
self._struct = Struct(name=attrs['name'], fields=[])
self._field_index = 0
# The following are not set:
# - tag not set because not a request/response
# - code not set because not a response
if attrs.get('isFabricScoped', "false").lower() == 'true':
self._struct.qualities |= StructQuality.FABRIC_SCOPED
def GetNextProcessor(self, name: str, attrs):
if name.lower() == 'item':
data_type = DataType(
name=attrs['type']
)
if 'fieldId' in attrs:
self._field_index = ParseInt(attrs['fieldId'])
else:
# NOTE: code does NOT exist, so the number is incremental here
# this seems a defficiency in XML format.
self._field_index += 1
if 'length' in attrs:
data_type.max_length = ParseInt(attrs['length'])
field = Field(
data_type=data_type,
code=self._field_index,
name=attrs['name'],
is_list=(attrs.get('array', 'false').lower() == 'true'),
)
if attrs.get('optional', "false").lower() == 'true':
field.qualities |= FieldQuality.OPTIONAL
if attrs.get('isNullable', "false").lower() == 'true':
field.qualities |= FieldQuality.NULLABLE
if attrs.get('isFabricSensitive', "false").lower() == 'true':
field.qualities |= FieldQuality.FABRIC_SENSITIVE
self._struct.fields.append(field)
return BaseHandler(self.context, handled=HandledDepth.SINGLE_TAG)
elif name.lower() == 'cluster':
self._cluster_codes.add(ParseInt(attrs['code']))
return BaseHandler(self.context, handled=HandledDepth.SINGLE_TAG)
else:
return BaseHandler(self.context)
def FinalizeProcessing(self, idl: Idl):
# We have two choices of adding a struct:
# - inside a cluster if a code exists
# - inside top level if no codes were associated
if self._cluster_codes:
for code in self._cluster_codes:
found = False
for c in idl.clusters:
if c.code == code:
c.structs.append(self._struct)
found = True
if not found:
logging.error('Enum %s could not find cluster (code %d/0x%X)' %
(self._struct.name, code, code))
else:
idl.structs.append(self._struct)
def EndProcessing(self):
self.context.AddIdlPostProcessor(self)
class EnumHandler(BaseHandler, IdlPostProcessor):
""" Handling /configurator/enum elements."""
def __init__(self, context: Context, attrs):
super().__init__(context)
self._cluster_code = None # if set, enum belongs to a specific cluster
self._enum = Enum(name=attrs['name'], base_type=attrs['type'], entries=[])
def GetNextProcessor(self, name, attrs):
if name.lower() == 'item':
self._enum.entries.append(ConstantEntry(
name=attrs['name'],
code=ParseInt(attrs['value'])
))
return BaseHandler(self.context, handled=HandledDepth.SINGLE_TAG)
elif name.lower() == 'cluster':
if self._cluster_code is not None:
raise Exception('Multiple cluster codes for enum %s' % self._enum.name)
self._cluster_code = ParseInt(attrs['code'])
return BaseHandler(self.context, handled=HandledDepth.SINGLE_TAG)
else:
return BaseHandler(self.context)
def FinalizeProcessing(self, idl: Idl):
# We have two choices of adding an enum:
# - inside a cluster if a code exists
# - inside top level if a code does not exist
if self._cluster_code is None:
idl.enums.append(self._enum)
else:
found = False
for c in idl.clusters:
if c.code == self._cluster_code:
c.enums.append(self._enum)
found = True
if not found:
logging.error('Enum %s could not find its cluster (code %d/0x%X)' %
(self._enum.name, self._cluster_code, self._cluster_code))
def EndProcessing(self):
self.context.AddIdlPostProcessor(self)
class BitmapHandler(BaseHandler):
""" Handling /configurator/bitmap elements."""
def __init__(self, context: Context, attrs):
super().__init__(context)
self._cluster_codes = set()
self._bitmap = Bitmap(name=attrs['name'], base_type=attrs['type'], entries=[])
def GetNextProcessor(self, name, attrs):
if name.lower() == 'cluster':
# Multiple clusters may be associated, like IasZoneStatus
self._cluster_codes.add(ParseInt(attrs['code']))
return BaseHandler(self.context, handled=HandledDepth.SINGLE_TAG)
elif name.lower() == 'field':
self._bitmap.entries.append(ConstantEntry(
name=attrs['name'],
code=ParseInt(attrs['mask'])
))
return BaseHandler(self.context, handled=HandledDepth.SINGLE_TAG)
elif name.lower() == 'description':
return BaseHandler(self.context, handled=HandledDepth.ENTIRE_TREE)
else:
return BaseHandler(self.context)
def FinalizeProcessing(self, idl: Idl):
# We have two choices of adding an enum:
# - inside a cluster if a code exists
# - inside top level if a code does not exist
if not self._cluster_codes:
# Log only instead of critical, as not our XML is well formed.
# For example at the time of writing this, SwitchFeature in switch-cluster.xml
# did not have a code associated with it.
logging.error("Bitmap %r has no cluster codes" % self._bitmap)
return
for code in self._cluster_codes:
found = False
for c in idl.clusters:
if c.code == code:
c.bitmaps.append(self._bitmap)
found = True
if not found:
logging.error('Bitmap %s could not find its cluster (code %d/0x%X)' %
(self._bitmap.name, code, code))
def EndProcessing(self):
self.context.AddIdlPostProcessor(self)
class CommandHandler(BaseHandler):
"""Handles /configurator/cluster/command elements."""
def __init__(self, context: Context, cluster: Cluster, attrs):
super().__init__(context)
self._cluster = cluster
self._command = None
self._struct = Struct(name=attrs['name'], fields=[])
self._field_index = 0 # commands DO NOT support field index it seems
if attrs['source'].lower() == 'client':
self._struct.tag = StructTag.REQUEST
name = attrs['name']
if name.endswith('Request'):
request_name = name
command_name = name[:-7]
else:
request_name = name+'Request'
command_name = name
self._struct.name = request_name
if 'response' in attrs:
response_name = attrs['response']
else:
response_name = 'DefaultResponse'
self._command = Command(
name=name,
code=ParseInt(attrs['code']),
input_param=request_name,
output_param=response_name,
)
if attrs.get('isFabricScoped', 'false') == 'true':
self._command.qualities |= CommandQuality.FABRIC_SCOPED
if attrs.get('mustUseTimedInvoke', 'false') == 'true':
self._command.qualities |= CommandQuality.TIMED_INVOKE
else:
self._struct.tag = StructTag.RESPONSE
self._struct.code = ParseInt(attrs['code'])
def GetArgumentField(self, attrs):
data_type = DataType(name=attrs['type'])
if 'length' in attrs:
data_type.max_length = ParseInt(attrs['length'])
self._field_index += 1
field = Field(
data_type=data_type,
code=self._field_index,
name=attrs['name'],
is_list=(attrs.get('array', 'false') == 'true')
)
if attrs.get('optional', "false").lower() == 'true':
field.qualities |= FieldQuality.OPTIONAL
if attrs.get('isNullable', "false").lower() == 'true':
field.qualities |= FieldQuality.NULLABLE
return field
def GetNextProcessor(self, name: str, attrs):
if name.lower() == 'access':
if attrs['op'] != 'invoke':
raise Exception('Unknown access for %r' % self._struct)
if self._command:
self._command.invokeacl = AttrsToAccessPrivilege(attrs)
else:
logging.warning("Ignored access role for reply %r" % self._struct)
return BaseHandler(self.context, handled=HandledDepth.SINGLE_TAG)
elif name.lower() == 'arg':
self._struct.fields.append(self.GetArgumentField(attrs))
return BaseHandler(self.context, handled=HandledDepth.SINGLE_TAG)
elif name.lower() == 'description':
return BaseHandler(self.context, handled=HandledDepth.ENTIRE_TREE)
else:
return BaseHandler(self.context)
def EndProcessing(self):
if self._struct.fields:
self._cluster.structs.append(self._struct)
else:
# no input
self._command.input_param = None
if self._command:
self._cluster.commands.append(self._command)
class ClusterGlobalAttributeHandler(BaseHandler):
"""Handles /configurator/cluster/globalAttribute elements."""
def __init__(self, context: Context, cluster: Cluster, code: int):
super().__init__(context)
self._cluster = cluster
self._code = code
def GetNextProcessor(self, name: str, attrs):
if name.lower() == 'featurebit':
# It is uncler what featurebits mean. likely a bitmap should be created
# here, however only one such example exists currently: door-lock-cluster.xml
logging.info('Ignoring featurebit tag for global attribute 0x%X (%d)' % (self._code, self._code))
return BaseHandler(self.context, handled=HandledDepth.SINGLE_TAG)
else:
return BaseHandler(self.context)
def EndProcessing(self):
self._cluster.attributes.append(self.context.GetGlobalAttribute(self._code))
class ClusterHandler(BaseHandler):
"""Handles /configurator/cluster elements."""
def __init__(self, context: Context, idl: Idl):
super().__init__(context)
self._cluster = Cluster(
side=ClusterSide.CLIENT,
name=None,
code=None,
parse_meta=context.GetCurrentLocationMeta()
)
self._idl = idl
def GetNextProcessor(self, name: str, attrs):
if name.lower() == 'code':
return ClusterCodeHandler(self.context, self._cluster)
elif name.lower() == 'name':
return ClusterNameHandler(self.context, self._cluster)
elif name.lower() == 'attribute':
return AttributeHandler(self.context, self._cluster, attrs)
elif name.lower() == 'event':
return EventHandler(self.context, self._cluster, attrs)
elif name.lower() == 'globalattribute':
# We ignore 'side' and 'value' since they do not seem useful
return ClusterGlobalAttributeHandler(self.context, self._cluster, ParseInt(attrs['code']))
elif name.lower() == 'command':
return CommandHandler(self.context, self._cluster, attrs)
elif name.lower() in ['define', 'description', 'domain', 'tag', 'client', 'server']:
# NOTE: we COULD use client and server to create separate definitions
# of each, but the usefulness of this is unclear as the definitions are
# likely identical and matter has no concept of differences between the two
return BaseHandler(self.context, handled=HandledDepth.ENTIRE_TREE)
else:
return BaseHandler(self.context)
def EndProcessing(self):
if self._cluster.name is None:
raise Exception("Missing cluster name")
elif self._cluster.code is None:
raise Exception("Missing cluster code")
self._idl.clusters.append(self._cluster)
# Cluster extensions have extra bits for existing clusters. Can only be loaded
# IF the underlying cluster exits
class ClusterExtensionHandler(ClusterHandler, IdlPostProcessor):
"""Handling /configurator/clusterExtension elements."""
def __init__(self, context: Context, code: int):
# NOTE: IDL is set to NONE so that ClusterHandler cannot
# inadvertently change it (it will be invalid anyway)
super().__init__(context, None)
self._cluster_code = code
def EndProcessing(self):
self.context.AddIdlPostProcessor(self)
def FinalizeProcessing(self, idl: Idl):
found = False
for c in idl.clusters:
if c.code == self._cluster_code:
found = True
# Append everything that can be appended
c.enums.extend(self._cluster.enums)
c.bitmaps.extend(self._cluster.bitmaps)
c.events.extend(self._cluster.events)
c.attributes.extend(self._cluster.attributes)
c.structs.extend(self._cluster.structs)
c.commands.extend(self._cluster.commands)
if not found:
logging.error('Could not extend cluster 0x%X (%d): cluster not found' %
(self._cluster_code, self._cluster_code))
class GlobalAttributeHandler(BaseHandler):
"""Handling configurator/global/globalAttribute elements."""
def __init__(self, context: Context, attribute: Attribute):
super().__init__(context, handled=HandledDepth.SINGLE_TAG)
self._attribute = attribute
def HandleContent(self, content: str):
# Content generally is the name EXCEPT if access controls
# exist, in which case `description` contains the name
#
# Global attributes do not currently have access controls, so this
# case is not handled here
content = content.strip()
if content and not self._attribute.definition.name:
self._attribute.definition.name = content
def EndProcessing(self):
if self._attribute.definition.name is None:
raise Exception("Name for attribute was not parsed.")
self.context.AddGlobalAttribute(self._attribute)
class GlobalHandler(BaseHandler):
"""Handling configurator/global elements."""
def __init__(self, context: Context):
super().__init__(context, handled=HandledDepth.SINGLE_TAG)
def GetNextProcessor(self, name, attrs):
if name.lower() == 'attribute':
if attrs['side'].lower() == 'client':
# We expect to also have 'server' equivalent, so ignore client
# side attributes
logging.debug('Ignoring global client-side attribute %s' % (attrs['code']))
return BaseHandler(self.context, handled=HandledDepth.SINGLE_TAG)
return GlobalAttributeHandler(self.context, AttrsToAttribute(attrs))
else:
return BaseHandler(self.context)
class ConfiguratorHandler(BaseHandler):
""" Handling /configurator elements."""
def __init__(self, context: Context, idl: Idl):
super().__init__(context, handled=HandledDepth.SINGLE_TAG)
self._idl = idl
def GetNextProcessor(self, name: str, attrs):
if name.lower() == 'cluster':
return ClusterHandler(self.context, self._idl)
elif name.lower() == 'enum':
return EnumHandler(self.context, attrs)
elif name.lower() == 'struct':
return StructHandler(self.context, attrs)
elif name.lower() == 'bitmap':
return BitmapHandler(self.context, attrs)
elif name.lower() == 'domain':
return BaseHandler(self.context, handled=HandledDepth.ENTIRE_TREE)
elif name.lower() == 'clusterextension':
return ClusterExtensionHandler(self.context, ParseInt(attrs['code']))
elif name.lower() == 'accesscontrol':
# These contain operation/role/modifier and generally only contain a
# description. These do not seem as useful to parse.
return BaseHandler(self.context, handled=HandledDepth.ENTIRE_TREE)
elif name.lower() == 'atomic':
# A list of types in 'chip-types'
# Generally does not seem useful - matches a type id to a description, size and some discrete/analog flags
#
# Could be eventually used as a preload of types into base types, however matter idl
# generator logic has hardcoded sizing as well.
return BaseHandler(self.context, handled=HandledDepth.ENTIRE_TREE)
elif name.lower() == 'devicetype':
# A list of device types in 'matter-devices.xml'
# Useful for conformance tests, but does not seem usable for serialization logic
return BaseHandler(self.context, handled=HandledDepth.ENTIRE_TREE)
elif name.lower() == 'global':
return GlobalHandler(self.context)
else:
return BaseHandler(self.context)
| {
"content_hash": "5c72e7fe76671138d644fa2e55ac0edf",
"timestamp": "",
"source": "github",
"line_count": 600,
"max_line_length": 118,
"avg_line_length": 39.07333333333333,
"alnum_prop": 0.5937553318546324,
"repo_name": "project-chip/connectedhomeip",
"id": "6048b271ecf3ace19e88cb159cadf8779939efa1",
"size": "24031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/idl/zapxml/handlers/handlers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1759301"
},
{
"name": "C++",
"bytes": "19104548"
},
{
"name": "CMake",
"bytes": "140510"
},
{
"name": "Dockerfile",
"bytes": "50353"
},
{
"name": "Emacs Lisp",
"bytes": "1042"
},
{
"name": "Java",
"bytes": "167719"
},
{
"name": "JavaScript",
"bytes": "2106"
},
{
"name": "Jinja",
"bytes": "22322"
},
{
"name": "Objective-C",
"bytes": "930838"
},
{
"name": "Objective-C++",
"bytes": "435348"
},
{
"name": "Python",
"bytes": "1931007"
},
{
"name": "Shell",
"bytes": "195843"
},
{
"name": "Tcl",
"bytes": "311"
},
{
"name": "ZAP",
"bytes": "584219"
}
],
"symlink_target": ""
} |
"""
Various utilities
"""
from django.conf import settings
from django.contrib.sites.models import Site
from django.template import Variable, TemplateSyntaxError, VariableDoesNotExist
def get_site_metas(with_static=False, with_media=False, extra={}):
"""
Return metas from the current *Site* and settings
Added Site metas will be callable in templates like this ``SITE.themetaname``
This can be used in code out of a Django requests (like in management commands) or in
a context processor to get the *Site* urls.
Default metas returned :
* name: Current *Site* entry name;
* domain: Current *Site* entry domain;
* web_url: The Current *Site* entry domain prefixed with the http protocol;
Optionally it can also return ``STATIC_URL`` and ``MEDIA_URL`` if needed (like out
of Django requests).
"""
site_current = Site.objects.get_current()
metas = {
'SITE': {
'name': site_current.name,
'domain': site_current.domain,
'web_url': 'http://%s' % site_current.domain, # TODO: This will be problematic in a HTTPS environment
}
}
if with_media:
metas['MEDIA_URL'] = getattr(settings, 'MEDIA_URL', '')
if with_static:
metas['STATIC_URL'] = getattr(settings, 'STATIC_URL', '')
metas.update(extra)
return metas
def site_metas(request):
"""
Context processor to add the current *Site* metas to the context
"""
return get_site_metas()
| {
"content_hash": "db94f842c8c10aade1bbccc18494754b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 113,
"avg_line_length": 33.577777777777776,
"alnum_prop": 0.6472534745201853,
"repo_name": "sveetch/DjangoSveetchies",
"id": "f736e224f0c062ebbdcd4188cee371fe068bd5b7",
"size": "1535",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "173303"
},
{
"name": "Python",
"bytes": "22686"
},
{
"name": "Ruby",
"bytes": "959"
}
],
"symlink_target": ""
} |
__author__ = "Magdalena Rother, Tomasz Puton, Kristian Rother"
__copyright__ = "Copyright 2008, The Moderna Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__maintainer__ = "Magdalena Rother"
__email__ = "mmusiel@genesilico.pl"
__status__ = "Production"
"""
A procedure for calculating stacking of RNA nucleotides.
The definition of base stacking from Major & Lemieux
MC-Annotate paper (JMB 2001, 308, p.919ff):
"Stacking between two nitrogen bases is considered
if the distance between their rings is less
than 5.5 Ang., the angle between the two normals to
the base planes is inferior to 30 deg., and the angle
between the normal of one base plane and the vector
between the center of the rings from the two
bases is less than 40 deg."
There are two classes defined here:
- ResidueVector
- StackingCalculator
The latter class should be used for calculating stacking. There are two
public methods inside StackingCalculator class that can be used
for calculating stacking:
- process_pdbfile(file_name, chain_id='A') - which runs StackingCalculator
on the RNA from the 'file_name'.
The second parameter is optional and has to be set, if the chain ID
of RNA from PDB file is different than 'A'.
"""
import sys
from numpy import array, add, cross, sqrt, arccos
from rna_tools.tools.mini_moderna3.moderna import *
from rna_tools.tools.mini_moderna3.moderna.Constants import NORMAL_SUPPORT, ARCPI
STACKINGS = {
(True, True): '>>',
(True, False): '<<',
(False, False): '<>',
(False, True): '><',
}
# vector placeholder functions
# code snatched from Scientific.Geometry
def angle(vec_a, vec_b):
cosa = add.reduce(vec_a*vec_b) / \
sqrt(add.reduce(vec_a*vec_a) * \
add.reduce(vec_b*vec_b))
cosa = max(-1., min(1., cosa))
return arccos(cosa) * ARCPI
class StackingInteraction(object):
"""Result from stacking calculation."""
def __init__(self, resi1, resi2, stack_type):
"""Creates a stacking object."""
self.resi1 = resi1
self.resi2 = resi2
self.type = stack_type
def __repr__(self):
return "%s %s %s"% \
(self.resi1.identifier, self.type, self.resi2.identifier)
class ResidueVector(object):
"""
Residue class with center vector and normal vector for stacking calculation.
"""
def __init__(self, residue):
"""
Creates a dictionary of vectors for each atom from a ModernaResidue.
"""
self.residue = residue
self.atoms = {}
for atom in residue.get_list():
atom_name = atom.get_fullname().strip().upper()
self.atoms[atom_name] = residue[atom_name].coord
self.normal_set = NORMAL_SUPPORT.get(residue.original_base)
self.normal = None
self.center = None
def is_valid(self):
"""Checks if all necessary atoms are present."""
if self.normal_set:
for name in self.normal_set:
if name not in self.atoms:
return False
return True
def calculate_vectors(self):
"""
Constructs the normal vectors for nucleotide bases.
Returns a tuple of vectors, the first pointing
from O to the center of the six-ring of the according base,
and the second being the normal
vector according to the definition of Major & Thibault 2006.
Assumes the residue has a complete set of atoms.
"""
# sum all six atom vectors up to get center point.
asum = array([0.0, 0.0, 0.0])
for atomname in self.normal_set:
asum += self.atoms[atomname]
self.center = asum / 6.0
# get two pairs of atoms spanning a plane
# and calculate the normal vector
atoma = self.atoms[self.normal_set[1]] - self.atoms[self.normal_set[0]]
atomb = self.atoms[self.normal_set[3]] - self.atoms[self.normal_set[2]]
self.normal = cross(atoma, atomb)
self.normal = self.normal/sqrt(add.reduce(self.normal*self.normal))
def calc_angles(self, rvec):
"""
Calculates whether the distance and angles between the vectors are OK.
Returns a tuple of (dist,nn_angle,n1cc_angle,n2cc_angle) or None.
"""
# calculate the distance between the two ring centers
ccvec = rvec.center - self.center
dist = sqrt(add.reduce(ccvec*ccvec)) # vector length
# check whether the distance is small enough to allow stacking
if 0.0 < dist < 5.5:
# check whether the angles are in the allowed range
nn_angle = angle(self.normal, rvec.normal)
if (nn_angle < 30 or nn_angle > 150):
n1cc_angle = angle(self.normal, ccvec)
n2cc_angle = angle(rvec.normal, ccvec)
return (dist, nn_angle, n1cc_angle, n2cc_angle)
return (None, None, None, None)
def get_stacking(self, rvec):
"""
Returns dictionary with one of the types
(<<, >>, <>, ><) for the two residues.
Or None, if they are not stacked.
"""
distance, nn_ang, n1cc_ang, n2cc_ang = self.calc_angles(rvec)
if distance and (n1cc_ang < 40 or n1cc_ang > 140 \
or n2cc_ang < 40 or n2cc_ang > 140):
# find out whether the normals are opposed or straight
# (pointing in the same direction).
if nn_ang < 30:
straight = True
elif nn_ang > 150:
straight = False
else:
return None # invalid normal angle
# find out whether base2 is on top of base1
# calculate whether the normal on base1 brings one closer to base2
n1c2 = rvec.center - self.center - self.normal
n1c2dist = sqrt(add.reduce(n1c2*n1c2)) # vector length
is_up = n1c2dist < distance
stacktype = STACKINGS[(straight, is_up)]
return StackingInteraction(self.residue, \
rvec.residue, stacktype)
class StackingCalculator:
"""
Calculates stacking of nucleotide bases according
to the definition of Major & Thibault 2006.
Input are residues as parsed by Bio.PDB or Moderna.
Output are the two residue objects and
>> << <> >< stacking codes.
"""
def get_stacking(self, moderna_struct):
"""
Loops through all the residues in a ModernaStructure object,
calls the stacking calculation procedure for all of them.
The method returns list of tuples. Each tuple contains:
- Residue Object one
- Residue Object two
- stacking type of these residues (>>, <<, <> or ><)
"""
result = []
rvectors = self.calc_residue_vectors(moderna_struct)
for record in self.calc_stacking(rvectors):
if record not in result:
result.append(record)
return result
def calc_residue_vectors(self, moderna_struct):
"""
Precalculates vectors on each residue to make calculations faster.
"""
rvectors = []
for residue in moderna_struct:
rv = ResidueVector(residue)
if rv.is_valid():
rv.calculate_vectors()
rvectors.append(rv)
return rvectors
def calc_stacking(self, rvectors):
"""
Calculates stacking for all residues.
Generates tuples of (residue1,residue2,stacking_type).
"""
n_residues = len(rvectors)
for i in range(n_residues-1):
resvec1 = rvectors[i]
for j in range(i+1, n_residues):
resvec2 = rvectors[j]
stacking = resvec1.get_stacking(resvec2)
if stacking:
yield stacking
| {
"content_hash": "ff828914da79b44a75572d4ab49ee480",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 81,
"avg_line_length": 37.04651162790697,
"alnum_prop": 0.5989956057752668,
"repo_name": "m4rx9/rna-pdb-tools",
"id": "4dc6aeab50d1922cda413ae558b2ca96eb63e166",
"size": "8097",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rna_tools/tools/mini_moderna3/moderna/analyze/StackingCalculator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34107"
},
{
"name": "Shell",
"bytes": "1130"
}
],
"symlink_target": ""
} |
import time
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import require_jinja2
from django.urls import resolve
from django.views.generic import RedirectView, TemplateView, View
from . import views
class SimpleView(View):
"""
A simple view with a docstring.
"""
def get(self, request):
return HttpResponse('This is a simple view')
class SimplePostView(SimpleView):
post = SimpleView.get
class PostOnlyView(View):
def post(self, request):
return HttpResponse('This view only accepts POST')
class CustomizableView(SimpleView):
parameter = {}
def decorator(view):
view.is_decorated = True
return view
class DecoratedDispatchView(SimpleView):
@decorator
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
class AboutTemplateView(TemplateView):
def get(self, request):
return self.render_to_response({})
def get_template_names(self):
return ['generic_views/about.html']
class AboutTemplateAttributeView(TemplateView):
template_name = 'generic_views/about.html'
def get(self, request):
return self.render_to_response(context={})
class InstanceView(View):
def get(self, request):
return self
class ViewTest(SimpleTestCase):
rf = RequestFactory()
def _assert_simple(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'This is a simple view')
def test_no_init_kwargs(self):
"""
A view can't be accidentally instantiated before deployment
"""
msg = 'This method is available only on the class, not on instances.'
with self.assertRaisesMessage(AttributeError, msg):
SimpleView(key='value').as_view()
def test_no_init_args(self):
"""
A view can't be accidentally instantiated before deployment
"""
msg = 'as_view() takes 1 positional argument but 2 were given'
with self.assertRaisesMessage(TypeError, msg):
SimpleView.as_view('value')
def test_pathological_http_method(self):
"""
The edge case of a http request that spoofs an existing method name is caught.
"""
self.assertEqual(SimpleView.as_view()(
self.rf.get('/', REQUEST_METHOD='DISPATCH')
).status_code, 405)
def test_get_only(self):
"""
Test a view which only allows GET doesn't allow other methods.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get('/')))
self.assertEqual(SimpleView.as_view()(self.rf.post('/')).status_code, 405)
self.assertEqual(SimpleView.as_view()(
self.rf.get('/', REQUEST_METHOD='FAKE')
).status_code, 405)
def test_get_and_head(self):
"""
Test a view which supplies a GET method also responds correctly to HEAD.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get('/')))
response = SimpleView.as_view()(self.rf.head('/'))
self.assertEqual(response.status_code, 200)
def test_setup_get_and_head(self):
view_instance = SimpleView()
self.assertFalse(hasattr(view_instance, 'head'))
view_instance.setup(self.rf.get('/'))
self.assertTrue(hasattr(view_instance, 'head'))
self.assertEqual(view_instance.head, view_instance.get)
def test_head_no_get(self):
"""
Test a view which supplies no GET method responds to HEAD with HTTP 405.
"""
response = PostOnlyView.as_view()(self.rf.head('/'))
self.assertEqual(response.status_code, 405)
def test_get_and_post(self):
"""
Test a view which only allows both GET and POST.
"""
self._assert_simple(SimplePostView.as_view()(self.rf.get('/')))
self._assert_simple(SimplePostView.as_view()(self.rf.post('/')))
self.assertEqual(SimplePostView.as_view()(
self.rf.get('/', REQUEST_METHOD='FAKE')
).status_code, 405)
def test_invalid_keyword_argument(self):
"""
View arguments must be predefined on the class and can't
be named like a HTTP method.
"""
msg = (
'The method name %s is not accepted as a keyword argument to '
'SimpleView().'
)
# Check each of the allowed method names
for method in SimpleView.http_method_names:
with self.assertRaisesMessage(TypeError, msg % method):
SimpleView.as_view(**{method: 'value'})
# Check the case view argument is ok if predefined on the class...
CustomizableView.as_view(parameter="value")
# ...but raises errors otherwise.
msg = (
"CustomizableView() received an invalid keyword 'foobar'. "
"as_view only accepts arguments that are already attributes of "
"the class."
)
with self.assertRaisesMessage(TypeError, msg):
CustomizableView.as_view(foobar="value")
def test_calling_more_than_once(self):
"""
Test a view can only be called once.
"""
request = self.rf.get('/')
view = InstanceView.as_view()
self.assertNotEqual(view(request), view(request))
def test_class_attributes(self):
"""
The callable returned from as_view() has proper special attributes.
"""
cls = SimpleView
view = cls.as_view()
self.assertEqual(view.__doc__, cls.__doc__)
self.assertEqual(view.__name__, 'view')
self.assertEqual(view.__module__, cls.__module__)
self.assertEqual(view.__qualname__, f'{cls.as_view.__qualname__}.<locals>.view')
self.assertEqual(view.__annotations__, cls.dispatch.__annotations__)
self.assertFalse(hasattr(view, '__wrapped__'))
def test_dispatch_decoration(self):
"""
Attributes set by decorators on the dispatch method
are also present on the closure.
"""
self.assertTrue(DecoratedDispatchView.as_view().is_decorated)
def test_options(self):
"""
Views respond to HTTP OPTIONS requests with an Allow header
appropriate for the methods implemented by the view class.
"""
request = self.rf.options('/')
view = SimpleView.as_view()
response = view(request)
self.assertEqual(200, response.status_code)
self.assertTrue(response.headers['Allow'])
def test_options_for_get_view(self):
"""
A view implementing GET allows GET and HEAD.
"""
request = self.rf.options('/')
view = SimpleView.as_view()
response = view(request)
self._assert_allows(response, 'GET', 'HEAD')
def test_options_for_get_and_post_view(self):
"""
A view implementing GET and POST allows GET, HEAD, and POST.
"""
request = self.rf.options('/')
view = SimplePostView.as_view()
response = view(request)
self._assert_allows(response, 'GET', 'HEAD', 'POST')
def test_options_for_post_view(self):
"""
A view implementing POST allows POST.
"""
request = self.rf.options('/')
view = PostOnlyView.as_view()
response = view(request)
self._assert_allows(response, 'POST')
def _assert_allows(self, response, *expected_methods):
"Assert allowed HTTP methods reported in the Allow response header"
response_allows = set(response.headers['Allow'].split(', '))
self.assertEqual(set(expected_methods + ('OPTIONS',)), response_allows)
def test_args_kwargs_request_on_self(self):
"""
Test a view only has args, kwargs & request once `as_view`
has been called.
"""
bare_view = InstanceView()
view = InstanceView.as_view()(self.rf.get('/'))
for attribute in ('args', 'kwargs', 'request'):
self.assertNotIn(attribute, dir(bare_view))
self.assertIn(attribute, dir(view))
def test_overridden_setup(self):
class SetAttributeMixin:
def setup(self, request, *args, **kwargs):
self.attr = True
super().setup(request, *args, **kwargs)
class CheckSetupView(SetAttributeMixin, SimpleView):
def dispatch(self, request, *args, **kwargs):
assert hasattr(self, 'attr')
return super().dispatch(request, *args, **kwargs)
response = CheckSetupView.as_view()(self.rf.get('/'))
self.assertEqual(response.status_code, 200)
def test_not_calling_parent_setup_error(self):
class TestView(View):
def setup(self, request, *args, **kwargs):
pass # Not calling super().setup()
msg = (
"TestView instance has no 'request' attribute. Did you override "
"setup() and forget to call super()?"
)
with self.assertRaisesMessage(AttributeError, msg):
TestView.as_view()(self.rf.get('/'))
def test_setup_adds_args_kwargs_request(self):
request = self.rf.get('/')
args = ('arg 1', 'arg 2')
kwargs = {'kwarg_1': 1, 'kwarg_2': 'year'}
view = View()
view.setup(request, *args, **kwargs)
self.assertEqual(request, view.request)
self.assertEqual(args, view.args)
self.assertEqual(kwargs, view.kwargs)
def test_direct_instantiation(self):
"""
It should be possible to use the view by directly instantiating it
without going through .as_view() (#21564).
"""
view = PostOnlyView()
response = view.dispatch(self.rf.head('/'))
self.assertEqual(response.status_code, 405)
@override_settings(ROOT_URLCONF='generic_views.urls')
class TemplateViewTest(SimpleTestCase):
rf = RequestFactory()
def _assert_about(self, response):
response.render()
self.assertContains(response, '<h1>About</h1>')
def test_get(self):
"""
Test a view that simply renders a template on GET
"""
self._assert_about(AboutTemplateView.as_view()(self.rf.get('/about/')))
def test_head(self):
"""
Test a TemplateView responds correctly to HEAD
"""
response = AboutTemplateView.as_view()(self.rf.head('/about/'))
self.assertEqual(response.status_code, 200)
def test_get_template_attribute(self):
"""
Test a view that renders a template on GET with the template name as
an attribute on the class.
"""
self._assert_about(AboutTemplateAttributeView.as_view()(self.rf.get('/about/')))
def test_get_generic_template(self):
"""
Test a completely generic view that renders a template on GET
with the template name as an argument at instantiation.
"""
self._assert_about(TemplateView.as_view(template_name='generic_views/about.html')(self.rf.get('/about/')))
def test_template_name_required(self):
"""
A template view must provide a template name.
"""
msg = (
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'"
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.client.get('/template/no_template/')
@require_jinja2
def test_template_engine(self):
"""
A template view may provide a template engine.
"""
request = self.rf.get('/using/')
view = TemplateView.as_view(template_name='generic_views/using.html')
self.assertEqual(view(request).render().content, b'DTL\n')
view = TemplateView.as_view(template_name='generic_views/using.html', template_engine='django')
self.assertEqual(view(request).render().content, b'DTL\n')
view = TemplateView.as_view(template_name='generic_views/using.html', template_engine='jinja2')
self.assertEqual(view(request).render().content, b'Jinja2\n')
def test_template_params(self):
"""
A generic template view passes kwargs as context.
"""
response = self.client.get('/template/simple/bar/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['foo'], 'bar')
self.assertIsInstance(response.context['view'], View)
def test_extra_template_params(self):
"""
A template view can be customized to return extra context.
"""
response = self.client.get('/template/custom/bar/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['foo'], 'bar')
self.assertEqual(response.context['key'], 'value')
self.assertIsInstance(response.context['view'], View)
def test_cached_views(self):
"""
A template view can be cached
"""
response = self.client.get('/template/cached/bar/')
self.assertEqual(response.status_code, 200)
time.sleep(1.0)
response2 = self.client.get('/template/cached/bar/')
self.assertEqual(response2.status_code, 200)
self.assertEqual(response.content, response2.content)
time.sleep(2.0)
# Let the cache expire and test again
response2 = self.client.get('/template/cached/bar/')
self.assertEqual(response2.status_code, 200)
self.assertNotEqual(response.content, response2.content)
def test_content_type(self):
response = self.client.get('/template/content_type/')
self.assertEqual(response.headers['Content-Type'], 'text/plain')
def test_resolve_view(self):
match = resolve('/template/content_type/')
self.assertIs(match.func.view_class, TemplateView)
self.assertEqual(match.func.view_initkwargs['content_type'], 'text/plain')
def test_resolve_login_required_view(self):
match = resolve('/template/login_required/')
self.assertIs(match.func.view_class, TemplateView)
def test_extra_context(self):
response = self.client.get('/template/extra_context/')
self.assertEqual(response.context['title'], 'Title')
@override_settings(ROOT_URLCONF='generic_views.urls')
class RedirectViewTest(SimpleTestCase):
rf = RequestFactory()
def test_no_url(self):
"Without any configuration, returns HTTP 410 GONE"
response = RedirectView.as_view()(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 410)
def test_default_redirect(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_permanent_redirect(self):
"Permanent redirects are an option"
response = RedirectView.as_view(url='/bar/', permanent=True)(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/')
def test_temporary_redirect(self):
"Temporary redirects are an option"
response = RedirectView.as_view(url='/bar/', permanent=False)(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_include_args(self):
"GET arguments can be included in the redirected URL"
response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
response = RedirectView.as_view(url='/bar/', query_string=True)(self.rf.get('/foo/?pork=spam'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/?pork=spam')
def test_include_urlencoded_args(self):
"GET arguments can be URL-encoded when included in the redirected URL"
response = RedirectView.as_view(url='/bar/', query_string=True)(
self.rf.get('/foo/?unicode=%E2%9C%93'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/?unicode=%E2%9C%93')
def test_parameter_substitution(self):
"Redirection URLs can be parameterized"
response = RedirectView.as_view(url='/bar/%(object_id)d/')(self.rf.get('/foo/42/'), object_id=42)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/42/')
def test_named_url_pattern(self):
"Named pattern parameter should reverse to the matching pattern"
response = RedirectView.as_view(pattern_name='artist_detail')(self.rf.get('/foo/'), pk=1)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers['Location'], '/detail/artist/1/')
def test_named_url_pattern_using_args(self):
response = RedirectView.as_view(pattern_name='artist_detail')(self.rf.get('/foo/'), 1)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers['Location'], '/detail/artist/1/')
def test_redirect_POST(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.post('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_HEAD(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.head('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_OPTIONS(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.options('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_PUT(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.put('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_PATCH(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.patch('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_DELETE(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.delete('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_when_meta_contains_no_query_string(self):
"regression for #16705"
# we can't use self.rf.get because it always sets QUERY_STRING
response = RedirectView.as_view(url='/bar/')(self.rf.request(PATH_INFO='/foo/'))
self.assertEqual(response.status_code, 302)
def test_direct_instantiation(self):
"""
It should be possible to use the view without going through .as_view()
(#21564).
"""
view = RedirectView()
response = view.dispatch(self.rf.head('/foo/'))
self.assertEqual(response.status_code, 410)
class GetContextDataTest(SimpleTestCase):
def test_get_context_data_super(self):
test_view = views.CustomContextView()
context = test_view.get_context_data(kwarg_test='kwarg_value')
# the test_name key is inserted by the test classes parent
self.assertIn('test_name', context)
self.assertEqual(context['kwarg_test'], 'kwarg_value')
self.assertEqual(context['custom_key'], 'custom_value')
# test that kwarg overrides values assigned higher up
context = test_view.get_context_data(test_name='test_value')
self.assertEqual(context['test_name'], 'test_value')
def test_object_at_custom_name_in_context_data(self):
# Checks 'pony' key presence in dict returned by get_context_date
test_view = views.CustomSingleObjectView()
test_view.context_object_name = 'pony'
context = test_view.get_context_data()
self.assertEqual(context['pony'], test_view.object)
def test_object_in_get_context_data(self):
# Checks 'object' key presence in dict returned by get_context_date #20234
test_view = views.CustomSingleObjectView()
context = test_view.get_context_data()
self.assertEqual(context['object'], test_view.object)
class UseMultipleObjectMixinTest(SimpleTestCase):
rf = RequestFactory()
def test_use_queryset_from_view(self):
test_view = views.CustomMultipleObjectMixinView()
test_view.get(self.rf.get('/'))
# Don't pass queryset as argument
context = test_view.get_context_data()
self.assertEqual(context['object_list'], test_view.queryset)
def test_overwrite_queryset(self):
test_view = views.CustomMultipleObjectMixinView()
test_view.get(self.rf.get('/'))
queryset = [{'name': 'Lennon'}, {'name': 'Ono'}]
self.assertNotEqual(test_view.queryset, queryset)
# Overwrite the view's queryset with queryset from kwarg
context = test_view.get_context_data(object_list=queryset)
self.assertEqual(context['object_list'], queryset)
class SingleObjectTemplateResponseMixinTest(SimpleTestCase):
def test_template_mixin_without_template(self):
"""
We want to makes sure that if you use a template mixin, but forget the
template, it still tells you it's ImproperlyConfigured instead of
TemplateDoesNotExist.
"""
view = views.TemplateResponseWithoutTemplate()
msg = (
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'"
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
view.get_template_names()
| {
"content_hash": "5cf211a82c775c1482fdc019d17f09a6",
"timestamp": "",
"source": "github",
"line_count": 590,
"max_line_length": 114,
"avg_line_length": 37.60677966101695,
"alnum_prop": 0.6315575987020011,
"repo_name": "freakboy3742/django",
"id": "5872ecf3db8d40609543da50e4f0870af84c32d6",
"size": "22188",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "tests/generic_views/test_base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52958"
},
{
"name": "JavaScript",
"bytes": "102668"
},
{
"name": "Python",
"bytes": "9564866"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
} |
from sqlalchemy import func
from flask import Blueprint, render_template, g, redirect, url_for, \
flash, request, jsonify, make_response
from flask.ext.login import login_required
from flask.ext.babel import gettext
from dataviva import db
from datetime import datetime
# models
from dataviva.account.models import User
from dataviva.ask.models import Question, Status, Reply, Flag, Vote
# forms
from dataviva.admin.forms import AdminQuestionUpdateForm
from dataviva.utils.jinja_helpers import jinja_strip_html
#utils
from ..utils import send_mail
from functools import wraps
# import urllib2, urllib
# from config import SITE_MIRROR
mod = Blueprint('admin', __name__, url_prefix='/admin')
def get_current_user_role():
return g.user.role
def required_roles(*roles):
def wrapper(f):
@wraps(f)
def wrapped(*args, **kwargs):
if get_current_user_role() not in roles:
return gettext("You dont have permission to view this page.")
return f(*args, **kwargs)
return wrapped
return wrapper
@mod.before_request
def before_request():
g.page_type = "admin"
###############################
# Views for ALL logged in users
# ---------------------------
@mod.route('/')
@login_required
@required_roles(1)
def admin():
return redirect(url_for('.admin_users'))
@mod.route('/users/')
@login_required
@required_roles(1)
def admin_users():
ret = make_response(render_template("admin/admin_users.html"))
ret.headers.add('Last-Modified', datetime.now())
ret.headers.add('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0')
ret.headers.add('Pragma', 'no-cache')
return ret
@mod.route('/userslist/')
@login_required
@required_roles(1)
def admin_users_list():
offset = request.args.get('offset', 0)
limit = 50
# get all users EXCEPT the logged in user
query = User.query.filter(User.id != g.user.id)
items = query.limit(limit).offset(offset).all()
items = [i.serialize() for i in items]
ret = jsonify({"activities":items})
ret.headers.add('Last-Modified', datetime.now())
ret.headers.add('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0')
ret.headers.add('Pragma', 'no-cache')
return ret
@mod.route('/user/<int:user_id>/', methods = ['PUT','POST'])
@required_roles(1)
def update_user(user_id):
# test with:
# curl -i -H "Content-Type: application/json" -X PUT
# -d '{"role":2}' http://localhost:5000/admin/user/1
if g.user.is_authenticated() and g.user.role == 1:
user = User.query.get(user_id)
user.role = request.json.get('role', user.role)
db.session.add(user)
db.session.commit()
return jsonify( {'user': user.serialize()} )
else:
abort(404)
@mod.route('/questions/')
@mod.route('/questions/<status>/')
@login_required
@required_roles(1)
def admin_questions(status=None):
if not status:
return redirect(url_for(".admin_questions", status="pending"))
ret = make_response(render_template("admin/admin_questions.html"))
ret.headers.add('Last-Modified', datetime.now())
ret.headers.add('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0')
ret.headers.add('Pragma', 'no-cache')
return ret
@mod.route('/questionslist/<status>/')
@login_required
@required_roles(1)
def admin_questions_list(status=None):
offset = request.args.get('offset', 0)
limit = 50
# get all users EXCEPT the logged in user
curr_status = Status.query.filter_by(name=status).first_or_404()
query = Question.query.filter_by(status = curr_status)
items = query.limit(limit).offset(offset).all()
items = [i.serialize() for i in items]
for i in items:
i["question"] = jinja_strip_html(i["question"])
ret = jsonify({"activities":items})
ret.headers.add('Last-Modified', datetime.now())
ret.headers.add('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0')
ret.headers.add('Pragma', 'no-cache')
return ret
@mod.route('/mail/', methods=['GET', 'POST'])
def admin_mail():
status = "2"
return render_template('admin/mail/ask_feedback.html', status=status)
@mod.route('/questions/<status>/<int:question_id>/', methods=['GET', 'POST'])
@required_roles(1)
def admin_questions_edit(status, question_id):
q = Question.query.get_or_404(question_id)
s = Status.query.filter_by(name=status).first_or_404()
form = AdminQuestionUpdateForm()
if request.method == "POST":
previous_status = form.previous_status.data
q.status = form.status.data
q.status_notes = form.answer.data
q.body = form.body.data
q.question = form.question.data
q.language = form.language.data
db.session.add(q)
db.session.commit()
user = User.query.get(q.user_id)
# if status is approve or rejected send email
status_id = request.form['status']
subject = gettext('DataViva Reply')
if (status_id == "2" or status_id == "3") and int(user.agree_mailer) > 0 :
send_mail(subject, [user.email], render_template('admin/mail/ask_feedback.html', title=subject, status=status_id, user=user))
flash(gettext('This question has now been updated.'))
return redirect(url_for('.admin_questions', status=previous_status))
# set defaults
form.status.data = s
form.language.data = q.language
form.previous_status.data = s.name
form.answer.data = q.status_notes
form.question.data = q.question
form.body.data = q.body
return render_template("admin/admin_questions_edit.html",
question=q, status=status, form=form)
@mod.route('/questions/delete/<int:question_id>/', methods=['GET', 'POST'])
@required_roles(1)
def admin_questions_delete(question_id):
q = Question.query.get_or_404(question_id)
s = Status.query.filter_by(name=q.status).first()
status = s
db.session.delete(q)
db.session.commit()
flash(gettext('The item was successfully deleted.'))
return redirect(url_for(".admin_questions", status=status))
@mod.route('/replies/')
@login_required
@required_roles(1)
def admin_replies():
ret = make_response(render_template("admin/admin_replies.html"))
ret.headers.add('Last-Modified', datetime.now())
ret.headers.add('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0')
ret.headers.add('Pragma', 'no-cache')
return ret
@mod.route('/replieslist/')
@login_required
@required_roles(1)
def admin_replies_list():
offset = request.args.get('offset', 0)
limit = 50
# get all users EXCEPT the logged in user
reply = Reply.query.filter_by(hidden=2)
question = Question.query
items = reply.order_by(Reply.hidden.desc(), Reply.timestamp.desc()).limit(limit).offset(offset).all()
items = [i.serialize() for i in items]
for i in items:
i["body"] = strip_html(i["body"])
q = question.get(i["question_id"])
i['question_title'] = q.question
ret = jsonify({"activities":items})
ret.headers.add('Last-Modified', datetime.now())
ret.headers.add('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0')
ret.headers.add('Pragma', 'no-cache')
return ret
@mod.route('/replies/question/<int:question_id>/')
@login_required
@required_roles(1)
def admin_replies_question(question_id):
questions = Question.query.get_or_404(question_id)
reply = Reply.query.filter_by(question_id=question_id)
replies = reply.order_by(Reply.hidden.asc(), Reply.timestamp.desc()).limit(50).offset(0).all()
user = User.query.get(questions.user_id)
questions.replies = replies
#questions.user = user
return render_template("admin/admin_replies_question.html", q=questions)
@mod.route('/replieslist/question/<int:questionid>/')
@login_required
@required_roles(1)
def admin_replies_question_list(questionid):
offset = request.args.get('offset', 0)
limit = 50
# get all users EXCEPT the logged in user
reply = Reply.query.filter_by(question_id=questionid)
items = reply.order_by(Reply.hidden.desc(), Reply.timestamp.desc()).limit(limit).offset(offset).all()
items = [i.serialize() for i in items]
for i in items:
i["body"] = strip_html(i["body"])
ret = jsonify({"activities":items})
ret.headers.add('Last-Modified', datetime.now())
ret.headers.add('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0')
ret.headers.add('Pragma', 'no-cache')
return ret
@mod.route('/replies/delete/<int:reply_id>/')
@required_roles(1)
def delete_reply_question(reply_id):
vote = Vote.query.filter_by(type_id=reply_id).delete()
reply = Reply.query.get(reply_id)
question = reply.question_id
db.session.delete(reply)
db.session.commit()
flash(gettext('The item was successfully deleted.'))
return redirect(url_for(".admin_replies_question", question_id=question))
@mod.route('/reply/delete/<int:reply_id>/', methods = ['GET'])
@required_roles(1)
def delete_reply(reply_id):
reply = Reply.query.get(reply_id)
db.session.delete(reply)
db.session.commit()
flash(gettext('The item was successfully deleted.'))
return redirect(url_for(".admin_replies"))
@mod.route('/reply/<int:reply_id>/', methods = ['PUT','POST'])
@required_roles(1)
def update_reply(reply_id):
# test with:
# curl -i -H "Content-Type: application/json" -X PUT
# -d '{"role":2}' http://localhost:5000/admin/user/1
if g.user.is_authenticated() and g.user.role == 1:
reply = Reply.query.get(reply_id)
reply.hidden = request.json.get('hidden', reply.hidden)
db.session.add(reply)
db.session.commit()
return jsonify( {'reply': reply.serialize()} )
else:
abort(404)
| {
"content_hash": "d9e6e3838c861a7f01d5a61dc6196b22",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 137,
"avg_line_length": 29.196531791907514,
"alnum_prop": 0.6498713126113641,
"repo_name": "dogobox/datavivamaster",
"id": "1ac05a00e5538d2f6d43ddbf57d43e4c27c29dbe",
"size": "10102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dataviva/admin/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "223703"
},
{
"name": "JavaScript",
"bytes": "205565"
},
{
"name": "Python",
"bytes": "244375"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.